2 * Copyright (c) 2015-2016, Mellanox Technologies. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
35 #include <linux/if_vlan.h>
36 #include <linux/etherdevice.h>
37 #include <linux/timecounter.h>
38 #include <linux/net_tstamp.h>
39 #include <linux/ptp_clock_kernel.h>
40 #include <linux/mlx5/driver.h>
41 #include <linux/mlx5/qp.h>
42 #include <linux/mlx5/cq.h>
43 #include <linux/mlx5/port.h>
44 #include <linux/mlx5/vport.h>
45 #include <linux/mlx5/transobj.h>
46 #include <linux/rhashtable.h>
47 #include <net/switchdev.h>
49 #include "mlx5_core.h"
52 #define MLX5_SET_CFG(p, f, v) MLX5_SET(create_flow_group_in, p, f, v)
54 #define MLX5E_HW2SW_MTU(hwmtu) ((hwmtu) - (ETH_HLEN + VLAN_HLEN + ETH_FCS_LEN))
55 #define MLX5E_SW2HW_MTU(swmtu) ((swmtu) + (ETH_HLEN + VLAN_HLEN + ETH_FCS_LEN))
57 #define MLX5E_MAX_NUM_TC 8
59 #define MLX5E_PARAMS_MINIMUM_LOG_SQ_SIZE 0x6
60 #define MLX5E_PARAMS_DEFAULT_LOG_SQ_SIZE 0xa
61 #define MLX5E_PARAMS_MAXIMUM_LOG_SQ_SIZE 0xd
63 #define MLX5E_PARAMS_MINIMUM_LOG_RQ_SIZE 0x1
64 #define MLX5E_PARAMS_DEFAULT_LOG_RQ_SIZE 0xa
65 #define MLX5E_PARAMS_MAXIMUM_LOG_RQ_SIZE 0xd
67 #define MLX5E_PARAMS_MINIMUM_LOG_RQ_SIZE_MPW 0x1
68 #define MLX5E_PARAMS_DEFAULT_LOG_RQ_SIZE_MPW 0x3
69 #define MLX5E_PARAMS_MAXIMUM_LOG_RQ_SIZE_MPW 0x6
71 #define MLX5_RX_HEADROOM NET_SKB_PAD
73 #define MLX5_MPWRQ_MIN_LOG_STRIDE_SZ(mdev) \
74 (6 + MLX5_CAP_GEN(mdev, cache_line_128byte)) /* HW restriction */
75 #define MLX5_MPWRQ_LOG_STRIDE_SZ(mdev, req) \
76 max_t(u32, MLX5_MPWRQ_MIN_LOG_STRIDE_SZ(mdev), req)
77 #define MLX5_MPWRQ_DEF_LOG_STRIDE_SZ(mdev) MLX5_MPWRQ_LOG_STRIDE_SZ(mdev, 6)
78 #define MLX5_MPWRQ_CQE_CMPRS_LOG_STRIDE_SZ(mdev) MLX5_MPWRQ_LOG_STRIDE_SZ(mdev, 8)
80 #define MLX5_MPWRQ_LOG_WQE_SZ 18
81 #define MLX5_MPWRQ_WQE_PAGE_ORDER (MLX5_MPWRQ_LOG_WQE_SZ - PAGE_SHIFT > 0 ? \
82 MLX5_MPWRQ_LOG_WQE_SZ - PAGE_SHIFT : 0)
83 #define MLX5_MPWRQ_PAGES_PER_WQE BIT(MLX5_MPWRQ_WQE_PAGE_ORDER)
84 #define MLX5_MPWRQ_STRIDES_PER_PAGE (MLX5_MPWRQ_NUM_STRIDES >> \
85 MLX5_MPWRQ_WQE_PAGE_ORDER)
87 #define MLX5_MTT_OCTW(npages) (ALIGN(npages, 8) / 2)
88 #define MLX5E_REQUIRED_MTTS(wqes) \
89 (wqes * ALIGN(MLX5_MPWRQ_PAGES_PER_WQE, 8))
90 #define MLX5E_VALID_NUM_MTTS(num_mtts) (MLX5_MTT_OCTW(num_mtts) - 1 <= U16_MAX)
92 #define MLX5_UMR_ALIGN (2048)
93 #define MLX5_MPWRQ_SMALL_PACKET_THRESHOLD (128)
95 #define MLX5E_PARAMS_DEFAULT_LRO_WQE_SZ (64 * 1024)
96 #define MLX5E_DEFAULT_LRO_TIMEOUT 32
97 #define MLX5E_LRO_TIMEOUT_ARR_SIZE 4
99 #define MLX5E_PARAMS_DEFAULT_RX_CQ_MODERATION_USEC 0x10
100 #define MLX5E_PARAMS_DEFAULT_RX_CQ_MODERATION_USEC_FROM_CQE 0x3
101 #define MLX5E_PARAMS_DEFAULT_RX_CQ_MODERATION_PKTS 0x20
102 #define MLX5E_PARAMS_DEFAULT_TX_CQ_MODERATION_USEC 0x10
103 #define MLX5E_PARAMS_DEFAULT_TX_CQ_MODERATION_PKTS 0x20
104 #define MLX5E_PARAMS_DEFAULT_MIN_RX_WQES 0x80
105 #define MLX5E_PARAMS_DEFAULT_MIN_RX_WQES_MPW 0x2
107 #define MLX5E_LOG_INDIR_RQT_SIZE 0x7
108 #define MLX5E_INDIR_RQT_SIZE BIT(MLX5E_LOG_INDIR_RQT_SIZE)
109 #define MLX5E_MIN_NUM_CHANNELS 0x1
110 #define MLX5E_MAX_NUM_CHANNELS (MLX5E_INDIR_RQT_SIZE >> 1)
111 #define MLX5E_MAX_NUM_SQS (MLX5E_MAX_NUM_CHANNELS * MLX5E_MAX_NUM_TC)
112 #define MLX5E_TX_CQ_POLL_BUDGET 128
113 #define MLX5E_UPDATE_STATS_INTERVAL 200 /* msecs */
115 #define MLX5E_ICOSQ_MAX_WQEBBS \
116 (DIV_ROUND_UP(sizeof(struct mlx5e_umr_wqe), MLX5_SEND_WQE_BB))
118 #define MLX5E_XDP_MIN_INLINE (ETH_HLEN + VLAN_HLEN)
119 #define MLX5E_XDP_TX_DS_COUNT \
120 ((sizeof(struct mlx5e_tx_wqe) / MLX5_SEND_WQE_DS) + 1 /* SG DS */)
122 #define MLX5E_NUM_MAIN_GROUPS 9
124 static inline u16
mlx5_min_rx_wqes(int wq_type
, u32 wq_size
)
127 case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ
:
128 return min_t(u16
, MLX5E_PARAMS_DEFAULT_MIN_RX_WQES_MPW
,
131 return min_t(u16
, MLX5E_PARAMS_DEFAULT_MIN_RX_WQES
,
136 static inline int mlx5_min_log_rq_size(int wq_type
)
139 case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ
:
140 return MLX5E_PARAMS_MINIMUM_LOG_RQ_SIZE_MPW
;
142 return MLX5E_PARAMS_MINIMUM_LOG_RQ_SIZE
;
146 static inline int mlx5_max_log_rq_size(int wq_type
)
149 case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ
:
150 return MLX5E_PARAMS_MAXIMUM_LOG_RQ_SIZE_MPW
;
152 return MLX5E_PARAMS_MAXIMUM_LOG_RQ_SIZE
;
156 struct mlx5e_tx_wqe
{
157 struct mlx5_wqe_ctrl_seg ctrl
;
158 struct mlx5_wqe_eth_seg eth
;
161 struct mlx5e_rx_wqe
{
162 struct mlx5_wqe_srq_next_seg next
;
163 struct mlx5_wqe_data_seg data
;
166 struct mlx5e_umr_wqe
{
167 struct mlx5_wqe_ctrl_seg ctrl
;
168 struct mlx5_wqe_umr_ctrl_seg uctrl
;
169 struct mlx5_mkey_seg mkc
;
170 struct mlx5_wqe_data_seg data
;
173 extern const char mlx5e_self_tests
[][ETH_GSTRING_LEN
];
175 static const char mlx5e_priv_flags
[][ETH_GSTRING_LEN
] = {
180 enum mlx5e_priv_flag
{
181 MLX5E_PFLAG_RX_CQE_BASED_MODER
= (1 << 0),
182 MLX5E_PFLAG_RX_CQE_COMPRESS
= (1 << 1),
185 #define MLX5E_SET_PFLAG(params, pflag, enable) \
188 (params)->pflags |= (pflag); \
190 (params)->pflags &= ~(pflag); \
193 #define MLX5E_GET_PFLAG(params, pflag) (!!((params)->pflags & (pflag)))
195 #ifdef CONFIG_MLX5_CORE_EN_DCB
196 #define MLX5E_MAX_BW_ALLOC 100 /* Max percentage of BW allocation */
199 struct mlx5e_cq_moder
{
204 struct mlx5e_params
{
207 u8 mpwqe_log_stride_sz
;
208 u8 mpwqe_log_num_strides
;
212 u8 rx_cq_period_mode
;
213 bool rx_cqe_compress_def
;
214 struct mlx5e_cq_moder rx_cq_moderation
;
215 struct mlx5e_cq_moder tx_cq_moderation
;
219 u8 tx_min_inline_mode
;
221 u8 toeplitz_hash_key
[40];
222 u32 indirection_rqt
[MLX5E_INDIR_RQT_SIZE
];
223 bool vlan_strip_disable
;
228 struct bpf_prog
*xdp_prog
;
231 #ifdef CONFIG_MLX5_CORE_EN_DCB
232 struct mlx5e_cee_config
{
233 /* bw pct for priority group */
234 u8 pg_bw_pct
[CEE_DCBX_MAX_PGS
];
235 u8 prio_to_pg_map
[CEE_DCBX_MAX_PRIO
];
236 bool pfc_setting
[CEE_DCBX_MAX_PRIO
];
243 MLX5_DCB_CHG_NO_RESET
,
247 enum mlx5_dcbx_oper_mode mode
;
248 struct mlx5e_cee_config cee_cfg
; /* pending configuration */
250 /* The only setting that cannot be read from FW */
251 u8 tc_tsa
[IEEE_8021QAZ_MAX_TCS
];
255 struct mlx5e_tstamp
{
257 struct cyclecounter cycles
;
258 struct timecounter clock
;
259 struct hwtstamp_config hwtstamp_config
;
261 unsigned long overflow_period
;
262 struct delayed_work overflow_work
;
263 struct mlx5_core_dev
*mdev
;
264 struct ptp_clock
*ptp
;
265 struct ptp_clock_info ptp_info
;
270 MLX5E_RQ_STATE_ENABLED
,
271 MLX5E_RQ_STATE_UMR_WQE_IN_PROGRESS
,
276 /* data path - accessed per cqe */
279 /* data path - accessed per napi poll */
281 struct napi_struct
*napi
;
282 struct mlx5_core_cq mcq
;
283 struct mlx5e_channel
*channel
;
285 /* cqe decompression */
286 struct mlx5_cqe64 title
;
287 struct mlx5_mini_cqe8 mini_arr
[MLX5_MINI_CQE_ARRAY_SIZE
];
290 u16 decmprs_wqe_counter
;
293 struct mlx5_core_dev
*mdev
;
294 struct mlx5_frag_wq_ctrl wq_ctrl
;
295 } ____cacheline_aligned_in_smp
;
297 struct mlx5e_tx_wqe_info
{
303 enum mlx5e_dma_map_type
{
304 MLX5E_DMA_MAP_SINGLE
,
308 struct mlx5e_sq_dma
{
311 enum mlx5e_dma_map_type type
;
315 MLX5E_SQ_STATE_ENABLED
,
318 struct mlx5e_sq_wqe_info
{
326 /* dirtied @completion */
331 u16 pc ____cacheline_aligned_in_smp
;
333 struct mlx5e_sq_stats stats
;
337 /* write@xmit, read@completion */
339 struct sk_buff
**skb
;
340 struct mlx5e_sq_dma
*dma_fifo
;
341 struct mlx5e_tx_wqe_info
*wqe_info
;
345 struct mlx5_wq_cyc wq
;
347 void __iomem
*uar_map
;
348 struct netdev_queue
*txq
;
354 struct mlx5e_tstamp
*tstamp
;
359 struct mlx5_wq_ctrl wq_ctrl
;
360 struct mlx5e_channel
*channel
;
363 } ____cacheline_aligned_in_smp
;
368 /* dirtied @rx completion */
374 /* write@xmit, read@completion */
376 struct mlx5e_dma_info
*di
;
381 struct mlx5_wq_cyc wq
;
382 void __iomem
*uar_map
;
390 struct mlx5_wq_ctrl wq_ctrl
;
391 struct mlx5e_channel
*channel
;
392 } ____cacheline_aligned_in_smp
;
397 /* dirtied @completion */
401 u16 pc ____cacheline_aligned_in_smp
;
407 /* write@xmit, read@completion */
409 struct mlx5e_sq_wqe_info
*ico_wqe
;
413 struct mlx5_wq_cyc wq
;
414 void __iomem
*uar_map
;
422 struct mlx5_wq_ctrl wq_ctrl
;
423 struct mlx5e_channel
*channel
;
424 } ____cacheline_aligned_in_smp
;
427 mlx5e_wqc_has_room_for(struct mlx5_wq_cyc
*wq
, u16 cc
, u16 pc
, u16 n
)
429 return (((wq
->sz_m1
& (cc
- pc
)) >= n
) || (cc
== pc
));
432 struct mlx5e_dma_info
{
437 struct mlx5e_umr_dma_info
{
440 struct mlx5e_dma_info dma_info
[MLX5_MPWRQ_PAGES_PER_WQE
];
441 struct mlx5e_umr_wqe wqe
;
444 struct mlx5e_mpw_info
{
445 struct mlx5e_umr_dma_info umr
;
446 u16 consumed_strides
;
447 u16 skbs_frags
[MLX5_MPWRQ_PAGES_PER_WQE
];
450 struct mlx5e_rx_am_stats
{
451 int ppms
; /* packets per msec */
452 int epms
; /* events per msec */
455 struct mlx5e_rx_am_sample
{
457 unsigned int pkt_ctr
;
461 struct mlx5e_rx_am
{ /* Adaptive Moderation */
463 struct mlx5e_rx_am_stats prev_stats
;
464 struct mlx5e_rx_am_sample start_sample
;
465 struct work_struct work
;
474 /* a single cache unit is capable to serve one napi call (for non-striding rq)
475 * or a MPWQE (for striding rq).
477 #define MLX5E_CACHE_UNIT (MLX5_MPWRQ_PAGES_PER_WQE > NAPI_POLL_WEIGHT ? \
478 MLX5_MPWRQ_PAGES_PER_WQE : NAPI_POLL_WEIGHT)
479 #define MLX5E_CACHE_SIZE (2 * roundup_pow_of_two(MLX5E_CACHE_UNIT))
480 struct mlx5e_page_cache
{
483 struct mlx5e_dma_info page_cache
[MLX5E_CACHE_SIZE
];
487 typedef void (*mlx5e_fp_handle_rx_cqe
)(struct mlx5e_rq
*, struct mlx5_cqe64
*);
488 typedef int (*mlx5e_fp_alloc_wqe
)(struct mlx5e_rq
*, struct mlx5e_rx_wqe
*, u16
);
489 typedef void (*mlx5e_fp_dealloc_wqe
)(struct mlx5e_rq
*, u16
);
493 struct mlx5_wq_ll wq
;
496 struct mlx5e_dma_info
*dma_info
;
498 struct mlx5e_mpw_info
*info
;
504 u32 wqe_sz
; /* wqe data buffer size */
505 u8 map_dir
; /* dma map direction */
510 struct net_device
*netdev
;
511 struct mlx5e_tstamp
*tstamp
;
512 struct mlx5e_rq_stats stats
;
514 struct mlx5e_page_cache page_cache
;
516 mlx5e_fp_handle_rx_cqe handle_rx_cqe
;
517 mlx5e_fp_alloc_wqe alloc_wqe
;
518 mlx5e_fp_dealloc_wqe dealloc_wqe
;
524 struct mlx5e_rx_am am
; /* Adaptive Moderation */
527 struct bpf_prog
*xdp_prog
;
528 struct mlx5e_xdpsq xdpsq
;
531 struct mlx5_wq_ctrl wq_ctrl
;
534 u32 mpwqe_num_strides
;
536 struct mlx5e_channel
*channel
;
537 struct mlx5_core_dev
*mdev
;
538 struct mlx5_core_mkey umr_mkey
;
539 } ____cacheline_aligned_in_smp
;
542 MLX5E_CHANNEL_NAPI_SCHED
= 1,
545 struct mlx5e_channel
{
548 struct mlx5e_txqsq sq
[MLX5E_MAX_NUM_TC
];
549 struct mlx5e_icosq icosq
; /* internal control operations */
551 struct napi_struct napi
;
553 struct net_device
*netdev
;
559 struct mlx5e_priv
*priv
;
560 struct mlx5_core_dev
*mdev
;
561 struct mlx5e_tstamp
*tstamp
;
566 struct mlx5e_channels
{
567 struct mlx5e_channel
**c
;
569 struct mlx5e_params params
;
572 enum mlx5e_traffic_types
{
577 MLX5E_TT_IPV4_IPSEC_AH
,
578 MLX5E_TT_IPV6_IPSEC_AH
,
579 MLX5E_TT_IPV4_IPSEC_ESP
,
580 MLX5E_TT_IPV6_IPSEC_ESP
,
585 MLX5E_NUM_INDIR_TIRS
= MLX5E_TT_ANY
,
589 MLX5E_STATE_ASYNC_EVENTS_ENABLED
,
591 MLX5E_STATE_DESTROYING
,
594 struct mlx5e_vxlan_db
{
595 spinlock_t lock
; /* protect vxlan table */
596 struct radix_tree_root tree
;
599 struct mlx5e_l2_rule
{
600 u8 addr
[ETH_ALEN
+ 2];
601 struct mlx5_flow_handle
*rule
;
604 struct mlx5e_flow_table
{
606 struct mlx5_flow_table
*t
;
607 struct mlx5_flow_group
**g
;
610 #define MLX5E_L2_ADDR_HASH_SIZE BIT(BITS_PER_BYTE)
612 struct mlx5e_tc_table
{
613 struct mlx5_flow_table
*t
;
615 struct rhashtable_params ht_params
;
616 struct rhashtable ht
;
619 struct mlx5e_vlan_table
{
620 struct mlx5e_flow_table ft
;
621 unsigned long active_vlans
[BITS_TO_LONGS(VLAN_N_VID
)];
622 struct mlx5_flow_handle
*active_vlans_rule
[VLAN_N_VID
];
623 struct mlx5_flow_handle
*untagged_rule
;
624 struct mlx5_flow_handle
*any_cvlan_rule
;
625 struct mlx5_flow_handle
*any_svlan_rule
;
626 bool filter_disabled
;
629 struct mlx5e_l2_table
{
630 struct mlx5e_flow_table ft
;
631 struct hlist_head netdev_uc
[MLX5E_L2_ADDR_HASH_SIZE
];
632 struct hlist_head netdev_mc
[MLX5E_L2_ADDR_HASH_SIZE
];
633 struct mlx5e_l2_rule broadcast
;
634 struct mlx5e_l2_rule allmulti
;
635 struct mlx5e_l2_rule promisc
;
636 bool broadcast_enabled
;
637 bool allmulti_enabled
;
638 bool promisc_enabled
;
641 /* L3/L4 traffic type classifier */
642 struct mlx5e_ttc_table
{
643 struct mlx5e_flow_table ft
;
644 struct mlx5_flow_handle
*rules
[MLX5E_NUM_TT
];
647 #define ARFS_HASH_SHIFT BITS_PER_BYTE
648 #define ARFS_HASH_SIZE BIT(BITS_PER_BYTE)
650 struct mlx5e_flow_table ft
;
651 struct mlx5_flow_handle
*default_rule
;
652 struct hlist_head rules_hash
[ARFS_HASH_SIZE
];
663 struct mlx5e_arfs_tables
{
664 struct arfs_table arfs_tables
[ARFS_NUM_TYPES
];
665 /* Protect aRFS rules list */
666 spinlock_t arfs_lock
;
667 struct list_head rules
;
669 struct workqueue_struct
*wq
;
674 MLX5E_VLAN_FT_LEVEL
= 0,
680 struct mlx5e_ethtool_table
{
681 struct mlx5_flow_table
*ft
;
685 #define ETHTOOL_NUM_L3_L4_FTS 7
686 #define ETHTOOL_NUM_L2_FTS 4
688 struct mlx5e_ethtool_steering
{
689 struct mlx5e_ethtool_table l3_l4_ft
[ETHTOOL_NUM_L3_L4_FTS
];
690 struct mlx5e_ethtool_table l2_ft
[ETHTOOL_NUM_L2_FTS
];
691 struct list_head rules
;
695 struct mlx5e_flow_steering
{
696 struct mlx5_flow_namespace
*ns
;
697 struct mlx5e_ethtool_steering ethtool
;
698 struct mlx5e_tc_table tc
;
699 struct mlx5e_vlan_table vlan
;
700 struct mlx5e_l2_table l2
;
701 struct mlx5e_ttc_table ttc
;
702 struct mlx5e_arfs_tables arfs
;
712 struct mlx5e_rqt rqt
;
713 struct list_head list
;
722 /* priv data path fields - start */
723 struct mlx5e_txqsq
*txq2sq
[MLX5E_MAX_NUM_CHANNELS
* MLX5E_MAX_NUM_TC
];
724 int channel_tc2txq
[MLX5E_MAX_NUM_CHANNELS
][MLX5E_MAX_NUM_TC
];
725 /* priv data path fields - end */
728 struct mutex state_lock
; /* Protects Interface state */
729 struct mlx5e_rq drop_rq
;
731 struct mlx5e_channels channels
;
732 u32 tisn
[MLX5E_MAX_NUM_TC
];
733 struct mlx5e_rqt indir_rqt
;
734 struct mlx5e_tir indir_tir
[MLX5E_NUM_INDIR_TIRS
];
735 struct mlx5e_tir direct_tir
[MLX5E_MAX_NUM_CHANNELS
];
736 u32 tx_rates
[MLX5E_MAX_NUM_SQS
];
738 struct mlx5e_flow_steering fs
;
739 struct mlx5e_vxlan_db vxlan
;
741 struct workqueue_struct
*wq
;
742 struct work_struct update_carrier_work
;
743 struct work_struct set_rx_mode_work
;
744 struct work_struct tx_timeout_work
;
745 struct delayed_work update_stats_work
;
747 struct mlx5_core_dev
*mdev
;
748 struct net_device
*netdev
;
749 struct mlx5e_stats stats
;
750 struct mlx5e_tstamp tstamp
;
752 #ifdef CONFIG_MLX5_CORE_EN_DCB
753 struct mlx5e_dcbx dcbx
;
756 const struct mlx5e_profile
*profile
;
760 struct mlx5e_profile
{
761 void (*init
)(struct mlx5_core_dev
*mdev
,
762 struct net_device
*netdev
,
763 const struct mlx5e_profile
*profile
, void *ppriv
);
764 void (*cleanup
)(struct mlx5e_priv
*priv
);
765 int (*init_rx
)(struct mlx5e_priv
*priv
);
766 void (*cleanup_rx
)(struct mlx5e_priv
*priv
);
767 int (*init_tx
)(struct mlx5e_priv
*priv
);
768 void (*cleanup_tx
)(struct mlx5e_priv
*priv
);
769 void (*enable
)(struct mlx5e_priv
*priv
);
770 void (*disable
)(struct mlx5e_priv
*priv
);
771 void (*update_stats
)(struct mlx5e_priv
*priv
);
772 int (*max_nch
)(struct mlx5_core_dev
*mdev
);
776 void mlx5e_build_ptys2ethtool_map(void);
778 u16
mlx5e_select_queue(struct net_device
*dev
, struct sk_buff
*skb
,
779 void *accel_priv
, select_queue_fallback_t fallback
);
780 netdev_tx_t
mlx5e_xmit(struct sk_buff
*skb
, struct net_device
*dev
);
782 void mlx5e_completion_event(struct mlx5_core_cq
*mcq
);
783 void mlx5e_cq_error_event(struct mlx5_core_cq
*mcq
, enum mlx5_event event
);
784 int mlx5e_napi_poll(struct napi_struct
*napi
, int budget
);
785 bool mlx5e_poll_tx_cq(struct mlx5e_cq
*cq
, int napi_budget
);
786 int mlx5e_poll_rx_cq(struct mlx5e_cq
*cq
, int budget
);
787 bool mlx5e_poll_xdpsq_cq(struct mlx5e_cq
*cq
);
788 void mlx5e_free_txqsq_descs(struct mlx5e_txqsq
*sq
);
789 void mlx5e_free_xdpsq_descs(struct mlx5e_xdpsq
*sq
);
791 void mlx5e_page_release(struct mlx5e_rq
*rq
, struct mlx5e_dma_info
*dma_info
,
793 void mlx5e_handle_rx_cqe(struct mlx5e_rq
*rq
, struct mlx5_cqe64
*cqe
);
794 void mlx5e_handle_rx_cqe_mpwrq(struct mlx5e_rq
*rq
, struct mlx5_cqe64
*cqe
);
795 bool mlx5e_post_rx_wqes(struct mlx5e_rq
*rq
);
796 int mlx5e_alloc_rx_wqe(struct mlx5e_rq
*rq
, struct mlx5e_rx_wqe
*wqe
, u16 ix
);
797 int mlx5e_alloc_rx_mpwqe(struct mlx5e_rq
*rq
, struct mlx5e_rx_wqe
*wqe
, u16 ix
);
798 void mlx5e_dealloc_rx_wqe(struct mlx5e_rq
*rq
, u16 ix
);
799 void mlx5e_dealloc_rx_mpwqe(struct mlx5e_rq
*rq
, u16 ix
);
800 void mlx5e_post_rx_mpwqe(struct mlx5e_rq
*rq
);
801 void mlx5e_free_rx_mpwqe(struct mlx5e_rq
*rq
, struct mlx5e_mpw_info
*wi
);
802 struct mlx5_cqe64
*mlx5e_get_cqe(struct mlx5e_cq
*cq
);
804 void mlx5e_rx_am(struct mlx5e_rq
*rq
);
805 void mlx5e_rx_am_work(struct work_struct
*work
);
806 struct mlx5e_cq_moder
mlx5e_am_get_def_profile(u8 rx_cq_period_mode
);
808 void mlx5e_update_stats(struct mlx5e_priv
*priv
);
810 int mlx5e_create_flow_steering(struct mlx5e_priv
*priv
);
811 void mlx5e_destroy_flow_steering(struct mlx5e_priv
*priv
);
812 void mlx5e_init_l2_addr(struct mlx5e_priv
*priv
);
813 void mlx5e_destroy_flow_table(struct mlx5e_flow_table
*ft
);
814 int mlx5e_self_test_num(struct mlx5e_priv
*priv
);
815 void mlx5e_self_test(struct net_device
*ndev
, struct ethtool_test
*etest
,
817 int mlx5e_ethtool_get_flow(struct mlx5e_priv
*priv
, struct ethtool_rxnfc
*info
,
819 int mlx5e_ethtool_get_all_flows(struct mlx5e_priv
*priv
,
820 struct ethtool_rxnfc
*info
, u32
*rule_locs
);
821 int mlx5e_ethtool_flow_replace(struct mlx5e_priv
*priv
,
822 struct ethtool_rx_flow_spec
*fs
);
823 int mlx5e_ethtool_flow_remove(struct mlx5e_priv
*priv
,
825 void mlx5e_ethtool_init_steering(struct mlx5e_priv
*priv
);
826 void mlx5e_ethtool_cleanup_steering(struct mlx5e_priv
*priv
);
827 void mlx5e_set_rx_mode_work(struct work_struct
*work
);
829 void mlx5e_fill_hwstamp(struct mlx5e_tstamp
*clock
, u64 timestamp
,
830 struct skb_shared_hwtstamps
*hwts
);
831 void mlx5e_timestamp_init(struct mlx5e_priv
*priv
);
832 void mlx5e_timestamp_cleanup(struct mlx5e_priv
*priv
);
833 void mlx5e_pps_event_handler(struct mlx5e_priv
*priv
,
834 struct ptp_clock_event
*event
);
835 int mlx5e_hwstamp_set(struct net_device
*dev
, struct ifreq
*ifr
);
836 int mlx5e_hwstamp_get(struct net_device
*dev
, struct ifreq
*ifr
);
837 int mlx5e_modify_rx_cqe_compression_locked(struct mlx5e_priv
*priv
, bool val
);
839 int mlx5e_vlan_rx_add_vid(struct net_device
*dev
, __always_unused __be16 proto
,
841 int mlx5e_vlan_rx_kill_vid(struct net_device
*dev
, __always_unused __be16 proto
,
843 void mlx5e_enable_vlan_filter(struct mlx5e_priv
*priv
);
844 void mlx5e_disable_vlan_filter(struct mlx5e_priv
*priv
);
846 struct mlx5e_redirect_rqt_param
{
849 u32 rqn
; /* Direct RQN (Non-RSS) */
852 struct mlx5e_channels
*channels
;
853 } rss
; /* RSS data */
857 int mlx5e_redirect_rqt(struct mlx5e_priv
*priv
, u32 rqtn
, int sz
,
858 struct mlx5e_redirect_rqt_param rrp
);
859 void mlx5e_build_indir_tir_ctx_hash(struct mlx5e_params
*params
,
860 enum mlx5e_traffic_types tt
,
863 int mlx5e_open_locked(struct net_device
*netdev
);
864 int mlx5e_close_locked(struct net_device
*netdev
);
866 int mlx5e_open_channels(struct mlx5e_priv
*priv
,
867 struct mlx5e_channels
*chs
);
868 void mlx5e_close_channels(struct mlx5e_channels
*chs
);
870 /* Function pointer to be used to modify WH settings while
873 typedef int (*mlx5e_fp_hw_modify
)(struct mlx5e_priv
*priv
);
874 void mlx5e_switch_priv_channels(struct mlx5e_priv
*priv
,
875 struct mlx5e_channels
*new_chs
,
876 mlx5e_fp_hw_modify hw_modify
);
878 void mlx5e_build_default_indir_rqt(struct mlx5_core_dev
*mdev
,
879 u32
*indirection_rqt
, int len
,
881 int mlx5e_get_max_linkspeed(struct mlx5_core_dev
*mdev
, u32
*speed
);
883 void mlx5e_set_rx_cq_mode_params(struct mlx5e_params
*params
,
885 void mlx5e_set_rq_type_params(struct mlx5_core_dev
*mdev
,
886 struct mlx5e_params
*params
, u8 rq_type
);
889 struct mlx5e_tx_wqe
*mlx5e_post_nop(struct mlx5_wq_cyc
*wq
, u32 sqn
, u16
*pc
)
891 u16 pi
= *pc
& wq
->sz_m1
;
892 struct mlx5e_tx_wqe
*wqe
= mlx5_wq_cyc_get_wqe(wq
, pi
);
893 struct mlx5_wqe_ctrl_seg
*cseg
= &wqe
->ctrl
;
895 memset(cseg
, 0, sizeof(*cseg
));
897 cseg
->opmod_idx_opcode
= cpu_to_be32((*pc
<< 8) | MLX5_OPCODE_NOP
);
898 cseg
->qpn_ds
= cpu_to_be32((sqn
<< 8) | 0x01);
906 void mlx5e_notify_hw(struct mlx5_wq_cyc
*wq
, u16 pc
,
907 void __iomem
*uar_map
,
908 struct mlx5_wqe_ctrl_seg
*ctrl
)
910 ctrl
->fm_ce_se
= MLX5_WQE_CTRL_CQ_UPDATE
;
911 /* ensure wqe is visible to device before updating doorbell record */
914 *wq
->db
= cpu_to_be32(pc
);
916 /* ensure doorbell record is visible to device before ringing the
921 mlx5_write64((__be32
*)ctrl
, uar_map
, NULL
);
924 static inline void mlx5e_cq_arm(struct mlx5e_cq
*cq
)
926 struct mlx5_core_cq
*mcq
;
929 mlx5_cq_arm(mcq
, MLX5_CQ_DB_REQ_NOT
, mcq
->uar
->map
, cq
->wq
.cc
);
932 static inline u32
mlx5e_get_wqe_mtt_offset(struct mlx5e_rq
*rq
, u16 wqe_ix
)
934 return wqe_ix
* ALIGN(MLX5_MPWRQ_PAGES_PER_WQE
, 8);
937 extern const struct ethtool_ops mlx5e_ethtool_ops
;
938 #ifdef CONFIG_MLX5_CORE_EN_DCB
939 extern const struct dcbnl_rtnl_ops mlx5e_dcbnl_ops
;
940 int mlx5e_dcbnl_ieee_setets_core(struct mlx5e_priv
*priv
, struct ieee_ets
*ets
);
941 void mlx5e_dcbnl_initialize(struct mlx5e_priv
*priv
);
944 #ifndef CONFIG_RFS_ACCEL
945 static inline int mlx5e_arfs_create_tables(struct mlx5e_priv
*priv
)
950 static inline void mlx5e_arfs_destroy_tables(struct mlx5e_priv
*priv
) {}
952 static inline int mlx5e_arfs_enable(struct mlx5e_priv
*priv
)
957 static inline int mlx5e_arfs_disable(struct mlx5e_priv
*priv
)
962 int mlx5e_arfs_create_tables(struct mlx5e_priv
*priv
);
963 void mlx5e_arfs_destroy_tables(struct mlx5e_priv
*priv
);
964 int mlx5e_arfs_enable(struct mlx5e_priv
*priv
);
965 int mlx5e_arfs_disable(struct mlx5e_priv
*priv
);
966 int mlx5e_rx_flow_steer(struct net_device
*dev
, const struct sk_buff
*skb
,
967 u16 rxq_index
, u32 flow_id
);
970 u16
mlx5e_get_max_inline_cap(struct mlx5_core_dev
*mdev
);
971 int mlx5e_create_tir(struct mlx5_core_dev
*mdev
,
972 struct mlx5e_tir
*tir
, u32
*in
, int inlen
);
973 void mlx5e_destroy_tir(struct mlx5_core_dev
*mdev
,
974 struct mlx5e_tir
*tir
);
975 int mlx5e_create_mdev_resources(struct mlx5_core_dev
*mdev
);
976 void mlx5e_destroy_mdev_resources(struct mlx5_core_dev
*mdev
);
977 int mlx5e_refresh_tirs(struct mlx5e_priv
*priv
, bool enable_uc_lb
);
979 struct mlx5_eswitch_rep
;
980 int mlx5e_vport_rep_load(struct mlx5_eswitch
*esw
,
981 struct mlx5_eswitch_rep
*rep
);
982 void mlx5e_vport_rep_unload(struct mlx5_eswitch
*esw
,
983 struct mlx5_eswitch_rep
*rep
);
984 int mlx5e_nic_rep_load(struct mlx5_eswitch
*esw
, struct mlx5_eswitch_rep
*rep
);
985 void mlx5e_nic_rep_unload(struct mlx5_eswitch
*esw
,
986 struct mlx5_eswitch_rep
*rep
);
987 int mlx5e_add_sqs_fwd_rules(struct mlx5e_priv
*priv
);
988 void mlx5e_remove_sqs_fwd_rules(struct mlx5e_priv
*priv
);
989 int mlx5e_attr_get(struct net_device
*dev
, struct switchdev_attr
*attr
);
990 void mlx5e_handle_rx_cqe_rep(struct mlx5e_rq
*rq
, struct mlx5_cqe64
*cqe
);
991 void mlx5e_update_hw_rep_counters(struct mlx5e_priv
*priv
);
993 int mlx5e_create_direct_rqts(struct mlx5e_priv
*priv
);
994 void mlx5e_destroy_rqt(struct mlx5e_priv
*priv
, struct mlx5e_rqt
*rqt
);
995 int mlx5e_create_direct_tirs(struct mlx5e_priv
*priv
);
996 void mlx5e_destroy_direct_tirs(struct mlx5e_priv
*priv
);
997 int mlx5e_create_tises(struct mlx5e_priv
*priv
);
998 void mlx5e_cleanup_nic_tx(struct mlx5e_priv
*priv
);
999 int mlx5e_close(struct net_device
*netdev
);
1000 int mlx5e_open(struct net_device
*netdev
);
1001 void mlx5e_update_stats_work(struct work_struct
*work
);
1002 struct net_device
*mlx5e_create_netdev(struct mlx5_core_dev
*mdev
,
1003 const struct mlx5e_profile
*profile
,
1005 void mlx5e_destroy_netdev(struct mlx5_core_dev
*mdev
, struct mlx5e_priv
*priv
);
1006 int mlx5e_attach_netdev(struct mlx5_core_dev
*mdev
, struct net_device
*netdev
);
1007 void mlx5e_detach_netdev(struct mlx5_core_dev
*mdev
, struct net_device
*netdev
);
1008 u32
mlx5e_choose_lro_timeout(struct mlx5_core_dev
*mdev
, u32 wanted_timeout
);
1010 int mlx5e_get_offload_stats(int attr_id
, const struct net_device
*dev
,
1012 bool mlx5e_has_offload_stats(const struct net_device
*dev
, int attr_id
);
1014 bool mlx5e_is_uplink_rep(struct mlx5e_priv
*priv
);
1015 bool mlx5e_is_vf_vport_rep(struct mlx5e_priv
*priv
);
1016 #endif /* __MLX5_EN_H__ */