]> git.proxmox.com Git - mirror_ubuntu-jammy-kernel.git/blame - drivers/net/ethernet/mellanox/mlx5/core/en.h
net/mlx5e: Add TX port timestamp support
[mirror_ubuntu-jammy-kernel.git] / drivers / net / ethernet / mellanox / mlx5 / core / en.h
CommitLineData
f62b8bb8 1/*
1afff42c 2 * Copyright (c) 2015-2016, Mellanox Technologies. All rights reserved.
f62b8bb8
AV
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
1afff42c
MF
32#ifndef __MLX5_EN_H__
33#define __MLX5_EN_H__
f62b8bb8
AV
34
35#include <linux/if_vlan.h>
36#include <linux/etherdevice.h>
ef9814de
EBE
37#include <linux/timecounter.h>
38#include <linux/net_tstamp.h>
48935bbb 39#include <linux/crash_dump.h>
f62b8bb8
AV
40#include <linux/mlx5/driver.h>
41#include <linux/mlx5/qp.h>
42#include <linux/mlx5/cq.h>
ada68c31 43#include <linux/mlx5/port.h>
d18a9470 44#include <linux/mlx5/vport.h>
8d7f9ecb 45#include <linux/mlx5/transobj.h>
1ae1df3a 46#include <linux/mlx5/fs.h>
e8f887ac 47#include <linux/rhashtable.h>
18a2b7f9 48#include <net/udp_tunnel.h>
cb67b832 49#include <net/switchdev.h>
0ddf5432 50#include <net/xdp.h>
4f75da36 51#include <linux/dim.h>
8ff57c18 52#include <linux/bits.h>
f62b8bb8 53#include "wq.h"
f62b8bb8 54#include "mlx5_core.h"
9218b44d 55#include "en_stats.h"
3f3ab178 56#include "en/dcbnl.h"
fe6d86b3 57#include "en/fs.h"
cef35af3 58#include "lib/hv_vhca.h"
f62b8bb8 59
4d8fcf21 60extern const struct net_device_ops mlx5e_netdev_ops;
60bbf7ee
JDB
61struct page_pool;
62
bb909416
IL
63#define MLX5E_METADATA_ETHER_TYPE (0x8CE4)
64#define MLX5E_METADATA_ETHER_LEN 8
65
1cabe6b0
MG
66#define MLX5_SET_CFG(p, f, v) MLX5_SET(create_flow_group_in, p, f, v)
67
c139dbfd
ES
68#define MLX5E_ETH_HARD_MTU (ETH_HLEN + VLAN_HLEN + ETH_FCS_LEN)
69
472a1e44
TT
70#define MLX5E_HW2SW_MTU(params, hwmtu) ((hwmtu) - ((params)->hard_mtu))
71#define MLX5E_SW2HW_MTU(params, swmtu) ((swmtu) + ((params)->hard_mtu))
d8bec2b2 72
f62b8bb8
AV
73#define MLX5E_MAX_NUM_TC 8
74
1bfecfca 75#define MLX5_RX_HEADROOM NET_SKB_PAD
78aedd32
TT
76#define MLX5_SKB_FRAG_SZ(len) (SKB_DATA_ALIGN(len) + \
77 SKB_DATA_ALIGN(sizeof(struct skb_shared_info)))
1bfecfca 78
94816278
TT
79#define MLX5E_RX_MAX_HEAD (256)
80
f32f5bd2
DJ
81#define MLX5_MPWRQ_MIN_LOG_STRIDE_SZ(mdev) \
82 (6 + MLX5_CAP_GEN(mdev, cache_line_128byte)) /* HW restriction */
83#define MLX5_MPWRQ_LOG_STRIDE_SZ(mdev, req) \
84 max_t(u32, MLX5_MPWRQ_MIN_LOG_STRIDE_SZ(mdev), req)
94816278
TT
85#define MLX5_MPWRQ_DEF_LOG_STRIDE_SZ(mdev) \
86 MLX5_MPWRQ_LOG_STRIDE_SZ(mdev, order_base_2(MLX5E_RX_MAX_HEAD))
f32f5bd2 87
7e426671 88#define MLX5_MPWRQ_LOG_WQE_SZ 18
461017cb
TT
89#define MLX5_MPWRQ_WQE_PAGE_ORDER (MLX5_MPWRQ_LOG_WQE_SZ - PAGE_SHIFT > 0 ? \
90 MLX5_MPWRQ_LOG_WQE_SZ - PAGE_SHIFT : 0)
91#define MLX5_MPWRQ_PAGES_PER_WQE BIT(MLX5_MPWRQ_WQE_PAGE_ORDER)
fe4c988b
SM
92
93#define MLX5_MTT_OCTW(npages) (ALIGN(npages, 8) / 2)
c3c94023
AL
94/* Add another page to MLX5E_REQUIRED_WQE_MTTS as a buffer between
95 * WQEs, This page will absorb write overflow by the hardware, when
96 * receiving packets larger than MTU. These oversize packets are
97 * dropped by the driver at a later stage.
98 */
99#define MLX5E_REQUIRED_WQE_MTTS (ALIGN(MLX5_MPWRQ_PAGES_PER_WQE + 1, 8))
b8a98a4c 100#define MLX5E_LOG_ALIGNED_MPWQE_PPW (ilog2(MLX5E_REQUIRED_WQE_MTTS))
73281b78
TT
101#define MLX5E_REQUIRED_MTTS(wqes) (wqes * MLX5E_REQUIRED_WQE_MTTS)
102#define MLX5E_MAX_RQ_NUM_MTTS \
103 ((1 << 16) * 2) /* So that MLX5_MTT_OCTW(num_mtts) fits into u16 */
104#define MLX5E_ORDER2_MAX_PACKET_MTU (order_base_2(10 * 1024))
105#define MLX5E_PARAMS_MAXIMUM_LOG_RQ_SIZE_MPW \
106 (ilog2(MLX5E_MAX_RQ_NUM_MTTS / MLX5E_REQUIRED_WQE_MTTS))
107#define MLX5E_LOG_MAX_RQ_NUM_PACKETS_MPW \
108 (MLX5E_PARAMS_MAXIMUM_LOG_RQ_SIZE_MPW + \
109 (MLX5_MPWRQ_LOG_WQE_SZ - MLX5E_ORDER2_MAX_PACKET_MTU))
110
069d1146
TT
111#define MLX5E_MIN_SKB_FRAG_SZ (MLX5_SKB_FRAG_SZ(MLX5_RX_HEADROOM))
112#define MLX5E_LOG_MAX_RX_WQE_BULK \
113 (ilog2(PAGE_SIZE / roundup_pow_of_two(MLX5E_MIN_SKB_FRAG_SZ)))
114
73281b78
TT
115#define MLX5E_PARAMS_MINIMUM_LOG_SQ_SIZE 0x6
116#define MLX5E_PARAMS_DEFAULT_LOG_SQ_SIZE 0xa
117#define MLX5E_PARAMS_MAXIMUM_LOG_SQ_SIZE 0xd
118
069d1146 119#define MLX5E_PARAMS_MINIMUM_LOG_RQ_SIZE (1 + MLX5E_LOG_MAX_RX_WQE_BULK)
73281b78
TT
120#define MLX5E_PARAMS_DEFAULT_LOG_RQ_SIZE 0xa
121#define MLX5E_PARAMS_MAXIMUM_LOG_RQ_SIZE min_t(u8, 0xd, \
122 MLX5E_LOG_MAX_RQ_NUM_PACKETS_MPW)
123
124#define MLX5E_PARAMS_MINIMUM_LOG_RQ_SIZE_MPW 0x2
fe4c988b 125
d9a40271 126#define MLX5E_PARAMS_DEFAULT_LRO_WQE_SZ (64 * 1024)
2b029556
SM
127#define MLX5E_DEFAULT_LRO_TIMEOUT 32
128#define MLX5E_LRO_TIMEOUT_ARR_SIZE 4
129
f62b8bb8 130#define MLX5E_PARAMS_DEFAULT_RX_CQ_MODERATION_USEC 0x10
9908aa29 131#define MLX5E_PARAMS_DEFAULT_RX_CQ_MODERATION_USEC_FROM_CQE 0x3
f62b8bb8
AV
132#define MLX5E_PARAMS_DEFAULT_RX_CQ_MODERATION_PKTS 0x20
133#define MLX5E_PARAMS_DEFAULT_TX_CQ_MODERATION_USEC 0x10
0088cbbc 134#define MLX5E_PARAMS_DEFAULT_TX_CQ_MODERATION_USEC_FROM_CQE 0x10
f62b8bb8
AV
135#define MLX5E_PARAMS_DEFAULT_TX_CQ_MODERATION_PKTS 0x20
136#define MLX5E_PARAMS_DEFAULT_MIN_RX_WQES 0x80
461017cb 137#define MLX5E_PARAMS_DEFAULT_MIN_RX_WQES_MPW 0x2
f62b8bb8 138
936896e9
AS
139#define MLX5E_LOG_INDIR_RQT_SIZE 0x7
140#define MLX5E_INDIR_RQT_SIZE BIT(MLX5E_LOG_INDIR_RQT_SIZE)
b4e029da 141#define MLX5E_MIN_NUM_CHANNELS 0x1
57c7fce1 142#define MLX5E_MAX_NUM_CHANNELS MLX5E_INDIR_RQT_SIZE
507f0c81 143#define MLX5E_MAX_NUM_SQS (MLX5E_MAX_NUM_CHANNELS * MLX5E_MAX_NUM_TC)
f62b8bb8 144#define MLX5E_TX_CQ_POLL_BUDGET 128
db05815b 145#define MLX5E_TX_XSK_POLL_BUDGET 64
db75373c 146#define MLX5E_SQ_RECOVER_MIN_INTERVAL 500 /* msecs */
f62b8bb8 147
ea3886ca
TT
148#define MLX5E_UMR_WQE_INLINE_SZ \
149 (sizeof(struct mlx5e_umr_wqe) + \
150 ALIGN(MLX5_MPWRQ_PAGES_PER_WQE * sizeof(struct mlx5_mtt), \
151 MLX5_UMR_MTT_ALIGNMENT))
152#define MLX5E_UMR_WQEBBS \
153 (DIV_ROUND_UP(MLX5E_UMR_WQE_INLINE_SZ, MLX5_SEND_WQE_BB))
2f48af12 154
79c48764
GP
155#define MLX5E_MSG_LEVEL NETIF_MSG_LINK
156
157#define mlx5e_dbg(mlevel, priv, format, ...) \
158do { \
159 if (NETIF_MSG_##mlevel & (priv)->msglevel) \
160 netdev_warn(priv->netdev, format, \
161 ##__VA_ARGS__); \
162} while (0)
163
db05815b
MM
164enum mlx5e_rq_group {
165 MLX5E_RQ_GROUP_REGULAR,
166 MLX5E_RQ_GROUP_XSK,
694826e3 167#define MLX5E_NUM_RQ_GROUPS(g) (1 + MLX5E_RQ_GROUP_##g)
db05815b 168};
79c48764 169
45f171b1
MM
170static inline u8 mlx5e_get_num_lag_ports(struct mlx5_core_dev *mdev)
171{
172 if (mlx5_lag_is_lacp_owner(mdev))
173 return 1;
174
175 return clamp_t(u8, MLX5_CAP_GEN(mdev, num_lag_ports), 1, MLX5_MAX_PORTS);
176}
177
461017cb
TT
178static inline u16 mlx5_min_rx_wqes(int wq_type, u32 wq_size)
179{
180 switch (wq_type) {
181 case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ:
182 return min_t(u16, MLX5E_PARAMS_DEFAULT_MIN_RX_WQES_MPW,
183 wq_size / 2);
184 default:
185 return min_t(u16, MLX5E_PARAMS_DEFAULT_MIN_RX_WQES,
186 wq_size / 2);
187 }
188}
189
779d986d 190/* Use this function to get max num channels (rxqs/txqs) only to create netdev */
48935bbb
SM
191static inline int mlx5e_get_max_num_channels(struct mlx5_core_dev *mdev)
192{
193 return is_kdump_kernel() ?
194 MLX5E_MIN_NUM_CHANNELS :
f2f3df55 195 min_t(int, mlx5_comp_vectors_count(mdev), MLX5E_MAX_NUM_CHANNELS);
48935bbb
SM
196}
197
2f48af12
TT
198struct mlx5e_tx_wqe {
199 struct mlx5_wqe_ctrl_seg ctrl;
7d0d0d86
TT
200 struct mlx5_wqe_eth_seg eth;
201 struct mlx5_wqe_data_seg data[0];
2f48af12
TT
202};
203
99cbfa93 204struct mlx5e_rx_wqe_ll {
2f48af12 205 struct mlx5_wqe_srq_next_seg next;
339ffae5 206 struct mlx5_wqe_data_seg data[];
99cbfa93
TT
207};
208
209struct mlx5e_rx_wqe_cyc {
210 struct mlx5_wqe_data_seg data[0];
2f48af12 211};
86d722ad 212
bc77b240
TT
213struct mlx5e_umr_wqe {
214 struct mlx5_wqe_ctrl_seg ctrl;
215 struct mlx5_wqe_umr_ctrl_seg uctrl;
216 struct mlx5_mkey_seg mkc;
7d0d0d86 217 struct mlx5_mtt inline_mtts[0];
bc77b240
TT
218};
219
d605d668
KH
220extern const char mlx5e_self_tests[][ETH_GSTRING_LEN];
221
4e59e288 222enum mlx5e_priv_flag {
8ff57c18
TT
223 MLX5E_PFLAG_RX_CQE_BASED_MODER,
224 MLX5E_PFLAG_TX_CQE_BASED_MODER,
225 MLX5E_PFLAG_RX_CQE_COMPRESS,
226 MLX5E_PFLAG_RX_STRIDING_RQ,
227 MLX5E_PFLAG_RX_NO_CSUM_COMPLETE,
6277053a 228 MLX5E_PFLAG_XDP_TX_MPWQE,
5af75c74 229 MLX5E_PFLAG_SKB_TX_MPWQE,
145e5637 230 MLX5E_PFLAG_TX_PORT_TS,
8ff57c18 231 MLX5E_NUM_PFLAGS, /* Keep last */
4e59e288
GP
232};
233
6a9764ef 234#define MLX5E_SET_PFLAG(params, pflag, enable) \
59ece1c9
SD
235 do { \
236 if (enable) \
8ff57c18 237 (params)->pflags |= BIT(pflag); \
59ece1c9 238 else \
8ff57c18 239 (params)->pflags &= ~(BIT(pflag)); \
4e59e288
GP
240 } while (0)
241
8ff57c18 242#define MLX5E_GET_PFLAG(params, pflag) (!!((params)->pflags & (BIT(pflag))))
59ece1c9 243
f62b8bb8
AV
244struct mlx5e_params {
245 u8 log_sq_size;
461017cb 246 u8 rq_wq_type;
73281b78 247 u8 log_rq_mtu_frames;
f62b8bb8 248 u16 num_channels;
f62b8bb8 249 u8 num_tc;
9bcc8606 250 bool rx_cqe_compress_def;
69dad68d 251 bool tunneled_offload_en;
8960b389
TG
252 struct dim_cq_moder rx_cq_moderation;
253 struct dim_cq_moder tx_cq_moderation;
f62b8bb8 254 bool lro_en;
cff92d7c 255 u8 tx_min_inline_mode;
36350114 256 bool vlan_strip_disable;
102722fc 257 bool scatter_fcs_en;
9a317425 258 bool rx_dim_enabled;
cbce4f44 259 bool tx_dim_enabled;
2b029556 260 u32 lro_timeout;
59ece1c9 261 u32 pflags;
6a9764ef 262 struct bpf_prog *xdp_prog;
db05815b 263 struct mlx5e_xsk *xsk;
472a1e44
TT
264 unsigned int sw_mtu;
265 int hard_mtu;
f62b8bb8
AV
266};
267
268enum {
c0f1147d 269 MLX5E_RQ_STATE_ENABLED,
8276ea13 270 MLX5E_RQ_STATE_RECOVERING,
cb3c7fd4 271 MLX5E_RQ_STATE_AM,
b856df28 272 MLX5E_RQ_STATE_NO_CSUM_COMPLETE,
db849faa 273 MLX5E_RQ_STATE_CSUM_FULL, /* cqe_csum_full hw bit is set */
a2907436 274 MLX5E_RQ_STATE_FPGA_TLS, /* FPGA TLS enabled */
b7cf0806 275 MLX5E_RQ_STATE_MINI_CQE_HW_STRIDX /* set when mini_cqe_resp_stride_index cap is used */
f62b8bb8
AV
276};
277
f62b8bb8
AV
278struct mlx5e_cq {
279 /* data path - accessed per cqe */
280 struct mlx5_cqwq wq;
f62b8bb8
AV
281
282 /* data path - accessed per napi poll */
cb3c7fd4 283 u16 event_ctr;
f62b8bb8
AV
284 struct napi_struct *napi;
285 struct mlx5_core_cq mcq;
4d0b7ef9 286 struct mlx5e_ch_stats *ch_stats;
f62b8bb8 287
79d356ef 288 /* control */
4d0b7ef9 289 struct net_device *netdev;
79d356ef 290 struct mlx5_core_dev *mdev;
4d0b7ef9 291 struct mlx5e_priv *priv;
79d356ef
TT
292 struct mlx5_wq_ctrl wq_ctrl;
293} ____cacheline_aligned_in_smp;
294
295struct mlx5e_cq_decomp {
7219ab34
TT
296 /* cqe decompression */
297 struct mlx5_cqe64 title;
298 struct mlx5_mini_cqe8 mini_arr[MLX5_MINI_CQE_ARRAY_SIZE];
299 u8 mini_arr_idx;
79d356ef
TT
300 u16 left;
301 u16 wqe_counter;
f62b8bb8
AV
302} ____cacheline_aligned_in_smp;
303
eba2db2b
SM
304enum mlx5e_dma_map_type {
305 MLX5E_DMA_MAP_SINGLE,
306 MLX5E_DMA_MAP_PAGE
307};
308
309struct mlx5e_sq_dma {
310 dma_addr_t addr;
311 u32 size;
312 enum mlx5e_dma_map_type type;
313};
314
315enum {
316 MLX5E_SQ_STATE_ENABLED,
5af75c74 317 MLX5E_SQ_STATE_MPWQE,
db75373c 318 MLX5E_SQ_STATE_RECOVERING,
2ac9cfe7 319 MLX5E_SQ_STATE_IPSEC,
cbce4f44 320 MLX5E_SQ_STATE_AM,
bf239741 321 MLX5E_SQ_STATE_TLS,
b431302e 322 MLX5E_SQ_STATE_VLAN_NEED_L2_INLINE,
e7e0004a 323 MLX5E_SQ_STATE_PENDING_XSK_TX,
eba2db2b
SM
324};
325
b39fe61e
MM
326struct mlx5e_tx_mpwqe {
327 /* Current MPWQE session */
328 struct mlx5e_tx_wqe *wqe;
5af75c74 329 u32 bytes_count;
b39fe61e
MM
330 u8 ds_count;
331 u8 pkt_count;
332 u8 inline_on;
333};
334
0b676aae
EBE
335struct mlx5e_skb_fifo {
336 struct sk_buff **fifo;
337 u16 *pc;
338 u16 *cc;
339 u16 mask;
340};
341
145e5637
EBE
342struct mlx5e_ptpsq;
343
31391048 344struct mlx5e_txqsq {
eba2db2b
SM
345 /* data path */
346
347 /* dirtied @completion */
348 u16 cc;
338c46c6 349 u16 skb_fifo_cc;
eba2db2b 350 u32 dma_fifo_cc;
8960b389 351 struct dim dim; /* Adaptive Moderation */
eba2db2b
SM
352
353 /* dirtied @xmit */
354 u16 pc ____cacheline_aligned_in_smp;
338c46c6 355 u16 skb_fifo_pc;
eba2db2b 356 u32 dma_fifo_pc;
5af75c74 357 struct mlx5e_tx_mpwqe mpwqe;
eba2db2b
SM
358
359 struct mlx5e_cq cq;
360
eba2db2b
SM
361 /* read only */
362 struct mlx5_wq_cyc wq;
363 u32 dma_fifo_mask;
05909bab 364 struct mlx5e_sq_stats *stats;
9a3956da
TT
365 struct {
366 struct mlx5e_sq_dma *dma_fifo;
0b676aae 367 struct mlx5e_skb_fifo skb_fifo;
9a3956da
TT
368 struct mlx5e_tx_wqe_info *wqe_info;
369 } db;
eba2db2b
SM
370 void __iomem *uar_map;
371 struct netdev_queue *txq;
372 u32 sqn;
01614d4f 373 u16 stop_room;
eba2db2b 374 u8 min_inline_mode;
eba2db2b 375 struct device *pdev;
eba2db2b
SM
376 __be32 mkey_be;
377 unsigned long state;
84d1bb2b 378 unsigned int hw_mtu;
7c39afb3
FD
379 struct hwtstamp_config *tstamp;
380 struct mlx5_clock *clock;
4ad40d8e
EBE
381 struct net_device *netdev;
382 struct mlx5_core_dev *mdev;
383 struct mlx5e_priv *priv;
eba2db2b
SM
384
385 /* control path */
386 struct mlx5_wq_ctrl wq_ctrl;
57c70d87 387 int ch_ix;
acc6c595 388 int txq_ix;
eba2db2b 389 u32 rate_limit;
de8650a8 390 struct work_struct recover_work;
145e5637 391 struct mlx5e_ptpsq *ptpsq;
31391048
SM
392} ____cacheline_aligned_in_smp;
393
c94e4f11 394struct mlx5e_dma_info {
db05815b
MM
395 dma_addr_t addr;
396 union {
397 struct page *page;
39d6443c 398 struct xdp_buff *xsk;
db05815b 399 };
c94e4f11
TT
400};
401
d963fa15
MM
402/* XDP packets can be transmitted in different ways. On completion, we need to
403 * distinguish between them to clean up things in a proper way.
404 */
405enum mlx5e_xdp_xmit_mode {
406 /* An xdp_frame was transmitted due to either XDP_REDIRECT from another
407 * device or XDP_TX from an XSK RQ. The frame has to be unmapped and
408 * returned.
409 */
410 MLX5E_XDP_XMIT_MODE_FRAME,
411
412 /* The xdp_frame was created in place as a result of XDP_TX from a
413 * regular RQ. No DMA remapping happened, and the page belongs to us.
414 */
415 MLX5E_XDP_XMIT_MODE_PAGE,
416
417 /* No xdp_frame was created at all, the transmit happened from a UMEM
418 * page. The UMEM Completion Ring producer pointer has to be increased.
419 */
420 MLX5E_XDP_XMIT_MODE_XSK,
c94e4f11
TT
421};
422
423struct mlx5e_xdp_info {
d963fa15
MM
424 enum mlx5e_xdp_xmit_mode mode;
425 union {
426 struct {
427 struct xdp_frame *xdpf;
428 dma_addr_t dma_addr;
429 } frame;
430 struct {
b9673cf5 431 struct mlx5e_rq *rq;
d963fa15
MM
432 struct mlx5e_dma_info di;
433 } page;
434 };
435};
436
b39fe61e 437struct mlx5e_xmit_data {
d963fa15
MM
438 dma_addr_t dma_addr;
439 void *data;
440 u32 len;
c94e4f11
TT
441};
442
fea28dd6
TT
443struct mlx5e_xdp_info_fifo {
444 struct mlx5e_xdp_info *xi;
445 u32 *cc;
446 u32 *pc;
447 u32 mask;
448};
449
5e0d2eef 450struct mlx5e_xdpsq;
db05815b 451typedef int (*mlx5e_fp_xmit_xdp_frame_check)(struct mlx5e_xdpsq *);
d963fa15 452typedef bool (*mlx5e_fp_xmit_xdp_frame)(struct mlx5e_xdpsq *,
b39fe61e 453 struct mlx5e_xmit_data *,
db05815b
MM
454 struct mlx5e_xdp_info *,
455 int);
d963fa15 456
31391048
SM
457struct mlx5e_xdpsq {
458 /* data path */
459
dac0d15f 460 /* dirtied @completion */
fea28dd6 461 u32 xdpi_fifo_cc;
31391048 462 u16 cc;
31391048 463
dac0d15f 464 /* dirtied @xmit */
fea28dd6
TT
465 u32 xdpi_fifo_pc ____cacheline_aligned_in_smp;
466 u16 pc;
b8180392 467 struct mlx5_wqe_ctrl_seg *doorbell_cseg;
b39fe61e 468 struct mlx5e_tx_mpwqe mpwqe;
31391048 469
dac0d15f 470 struct mlx5e_cq cq;
31391048
SM
471
472 /* read only */
1742b3d5 473 struct xsk_buff_pool *xsk_pool;
31391048 474 struct mlx5_wq_cyc wq;
890388ad 475 struct mlx5e_xdpsq_stats *stats;
db05815b 476 mlx5e_fp_xmit_xdp_frame_check xmit_xdp_frame_check;
5e0d2eef 477 mlx5e_fp_xmit_xdp_frame xmit_xdp_frame;
dac0d15f 478 struct {
1feeab80 479 struct mlx5e_xdp_wqe_info *wqe_info;
fea28dd6 480 struct mlx5e_xdp_info_fifo xdpi_fifo;
dac0d15f 481 } db;
31391048
SM
482 void __iomem *uar_map;
483 u32 sqn;
484 struct device *pdev;
485 __be32 mkey_be;
486 u8 min_inline_mode;
487 unsigned long state;
c94e4f11 488 unsigned int hw_mtu;
31391048
SM
489
490 /* control path */
491 struct mlx5_wq_ctrl wq_ctrl;
492 struct mlx5e_channel *channel;
493} ____cacheline_aligned_in_smp;
494
495struct mlx5e_icosq {
496 /* data path */
fd9b4be8
TT
497 u16 cc;
498 u16 pc;
31391048 499
fd9b4be8 500 struct mlx5_wqe_ctrl_seg *doorbell_cseg;
31391048
SM
501 struct mlx5e_cq cq;
502
503 /* write@xmit, read@completion */
504 struct {
7d42c8e9 505 struct mlx5e_icosq_wqe_info *wqe_info;
31391048
SM
506 } db;
507
508 /* read only */
509 struct mlx5_wq_cyc wq;
510 void __iomem *uar_map;
511 u32 sqn;
31391048
SM
512 unsigned long state;
513
514 /* control path */
515 struct mlx5_wq_ctrl wq_ctrl;
516 struct mlx5e_channel *channel;
be5323c8
AL
517
518 struct work_struct recover_work;
eba2db2b
SM
519} ____cacheline_aligned_in_smp;
520
accd5883 521struct mlx5e_wqe_frag_info {
069d1146 522 struct mlx5e_dma_info *di;
accd5883 523 u32 offset;
069d1146 524 bool last_in_page;
accd5883
TT
525};
526
eba2db2b 527struct mlx5e_umr_dma_info {
eba2db2b 528 struct mlx5e_dma_info dma_info[MLX5_MPWRQ_PAGES_PER_WQE];
eba2db2b
SM
529};
530
531struct mlx5e_mpw_info {
532 struct mlx5e_umr_dma_info umr;
533 u16 consumed_strides;
22f45398 534 DECLARE_BITMAP(xdp_xmit_bitmap, MLX5_MPWRQ_PAGES_PER_WQE);
eba2db2b
SM
535};
536
069d1146
TT
537#define MLX5E_MAX_RX_FRAGS 4
538
4415a031
TT
539/* a single cache unit is capable to serve one napi call (for non-striding rq)
540 * or a MPWQE (for striding rq).
541 */
542#define MLX5E_CACHE_UNIT (MLX5_MPWRQ_PAGES_PER_WQE > NAPI_POLL_WEIGHT ? \
543 MLX5_MPWRQ_PAGES_PER_WQE : NAPI_POLL_WEIGHT)
29c2849e 544#define MLX5E_CACHE_SIZE (4 * roundup_pow_of_two(MLX5E_CACHE_UNIT))
4415a031
TT
545struct mlx5e_page_cache {
546 u32 head;
547 u32 tail;
548 struct mlx5e_dma_info page_cache[MLX5E_CACHE_SIZE];
549};
550
eba2db2b
SM
551struct mlx5e_rq;
552typedef void (*mlx5e_fp_handle_rx_cqe)(struct mlx5e_rq*, struct mlx5_cqe64*);
619a8f2a
TT
553typedef struct sk_buff *
554(*mlx5e_fp_skb_from_cqe_mpwrq)(struct mlx5e_rq *rq, struct mlx5e_mpw_info *wi,
555 u16 cqe_bcnt, u32 head_offset, u32 page_idx);
069d1146
TT
556typedef struct sk_buff *
557(*mlx5e_fp_skb_from_cqe)(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe,
558 struct mlx5e_wqe_frag_info *wi, u32 cqe_bcnt);
7cc6d77b 559typedef bool (*mlx5e_fp_post_rx_wqes)(struct mlx5e_rq *rq);
eba2db2b
SM
560typedef void (*mlx5e_fp_dealloc_wqe)(struct mlx5e_rq*, u16);
561
5adf4c47
TT
562int mlx5e_rq_set_handlers(struct mlx5e_rq *rq, struct mlx5e_params *params, bool xsk);
563
121e8927 564enum mlx5e_rq_flag {
f03590f7 565 MLX5E_RQ_FLAG_XDP_XMIT,
15143bf5 566 MLX5E_RQ_FLAG_XDP_REDIRECT,
121e8927
TT
567};
568
069d1146
TT
569struct mlx5e_rq_frag_info {
570 int frag_size;
571 int frag_stride;
572};
573
574struct mlx5e_rq_frags_info {
575 struct mlx5e_rq_frag_info arr[MLX5E_MAX_RX_FRAGS];
576 u8 num_frags;
577 u8 log_num_frags;
578 u8 wqe_bulk;
579};
580
f62b8bb8
AV
581struct mlx5e_rq {
582 /* data path */
21c59685 583 union {
accd5883 584 struct {
069d1146
TT
585 struct mlx5_wq_cyc wq;
586 struct mlx5e_wqe_frag_info *frags;
587 struct mlx5e_dma_info *di;
588 struct mlx5e_rq_frags_info info;
589 mlx5e_fp_skb_from_cqe skb_from_cqe;
accd5883 590 } wqe;
21c59685 591 struct {
422d4c40 592 struct mlx5_wq_ll wq;
b8a98a4c 593 struct mlx5e_umr_wqe umr_wqe;
21c59685 594 struct mlx5e_mpw_info *info;
619a8f2a 595 mlx5e_fp_skb_from_cqe_mpwrq skb_from_cqe_mpwrq;
b45d8b50 596 u16 num_strides;
fd9b4be8 597 u16 actual_wq_head;
89e89f7a 598 u8 log_stride_sz;
fd9b4be8
TT
599 u8 umr_in_progress;
600 u8 umr_last_bulk;
ed084fb6 601 u8 umr_completed;
21c59685
SM
602 } mpwqe;
603 };
1bfecfca 604 struct {
b45d8b50 605 u16 headroom;
d628ee4f 606 u32 frame0_sz;
b5503b99 607 u8 map_dir; /* dma map direction */
1bfecfca 608 } buff;
f62b8bb8
AV
609
610 struct device *pdev;
611 struct net_device *netdev;
05909bab 612 struct mlx5e_rq_stats *stats;
f62b8bb8 613 struct mlx5e_cq cq;
79d356ef 614 struct mlx5e_cq_decomp cqd;
4415a031 615 struct mlx5e_page_cache page_cache;
7c39afb3
FD
616 struct hwtstamp_config *tstamp;
617 struct mlx5_clock *clock;
521f31af
AL
618 struct mlx5e_icosq *icosq;
619 struct mlx5e_priv *priv;
4415a031 620
2f48af12 621 mlx5e_fp_handle_rx_cqe handle_rx_cqe;
7cc6d77b 622 mlx5e_fp_post_rx_wqes post_wqes;
6cd392a0 623 mlx5e_fp_dealloc_wqe dealloc_wqe;
f62b8bb8
AV
624
625 unsigned long state;
626 int ix;
0073c8f7 627 unsigned int hw_mtu;
f62b8bb8 628
8960b389 629 struct dim dim; /* Dynamic Interrupt Moderation */
31871f87
SM
630
631 /* XDP */
fe45386a 632 struct bpf_prog __rcu *xdp_prog;
b9673cf5 633 struct mlx5e_xdpsq *xdpsq;
121e8927 634 DECLARE_BITMAP(flags, 8);
60bbf7ee 635 struct page_pool *page_pool;
cb3c7fd4 636
db05815b 637 /* AF_XDP zero-copy */
1742b3d5 638 struct xsk_buff_pool *xsk_pool;
db05815b 639
8276ea13
AL
640 struct work_struct recover_work;
641
f62b8bb8
AV
642 /* control */
643 struct mlx5_wq_ctrl wq_ctrl;
b45d8b50 644 __be32 mkey_be;
461017cb 645 u8 wq_type;
f62b8bb8 646 u32 rqn;
a43b25da 647 struct mlx5_core_dev *mdev;
ec8b9981 648 struct mlx5_core_mkey umr_mkey;
c3c94023 649 struct mlx5e_dma_info wqe_overflow;
0ddf5432
JDB
650
651 /* XDP read-mostly */
652 struct xdp_rxq_info xdp_rxq;
f62b8bb8
AV
653} ____cacheline_aligned_in_smp;
654
db05815b
MM
655enum mlx5e_channel_state {
656 MLX5E_CHANNEL_STATE_XSK,
657 MLX5E_CHANNEL_NUM_STATES
658};
659
f62b8bb8
AV
660struct mlx5e_channel {
661 /* data path */
662 struct mlx5e_rq rq;
b9673cf5 663 struct mlx5e_xdpsq rq_xdpsq;
31391048
SM
664 struct mlx5e_txqsq sq[MLX5E_MAX_NUM_TC];
665 struct mlx5e_icosq icosq; /* internal control operations */
b5503b99 666 bool xdp;
f62b8bb8
AV
667 struct napi_struct napi;
668 struct device *pdev;
669 struct net_device *netdev;
670 __be32 mkey_be;
671 u8 num_tc;
45f171b1 672 u8 lag_port;
f62b8bb8 673
58b99ee3
TT
674 /* XDP_REDIRECT */
675 struct mlx5e_xdpsq xdpsq;
676
db05815b
MM
677 /* AF_XDP zero-copy */
678 struct mlx5e_rq xskrq;
679 struct mlx5e_xdpsq xsksq;
8d94b590
TT
680
681 /* Async ICOSQ */
682 struct mlx5e_icosq async_icosq;
683 /* async_icosq can be accessed from any CPU - the spinlock protects it. */
684 spinlock_t async_icosq_lock;
db05815b 685
a8c2eb15
TT
686 /* data path - accessed per napi poll */
687 struct irq_desc *irq_desc;
05909bab 688 struct mlx5e_ch_stats *stats;
f62b8bb8
AV
689
690 /* control */
691 struct mlx5e_priv *priv;
a43b25da 692 struct mlx5_core_dev *mdev;
7c39afb3 693 struct hwtstamp_config *tstamp;
db05815b 694 DECLARE_BITMAP(state, MLX5E_CHANNEL_NUM_STATES);
f62b8bb8 695 int ix;
231243c8 696 int cpu;
f62b8bb8
AV
697};
698
145e5637
EBE
699struct mlx5e_port_ptp;
700
ff9c852f
SM
701struct mlx5e_channels {
702 struct mlx5e_channel **c;
145e5637 703 struct mlx5e_port_ptp *port_ptp;
ff9c852f 704 unsigned int num;
6a9764ef 705 struct mlx5e_params params;
ff9c852f
SM
706};
707
05909bab
EBE
708struct mlx5e_channel_stats {
709 struct mlx5e_ch_stats ch;
710 struct mlx5e_sq_stats sq[MLX5E_MAX_NUM_TC];
711 struct mlx5e_rq_stats rq;
db05815b 712 struct mlx5e_rq_stats xskrq;
890388ad 713 struct mlx5e_xdpsq_stats rq_xdpsq;
58b99ee3 714 struct mlx5e_xdpsq_stats xdpsq;
db05815b 715 struct mlx5e_xdpsq_stats xsksq;
05909bab
EBE
716} ____cacheline_aligned_in_smp;
717
145e5637
EBE
718struct mlx5e_port_ptp_stats {
719 struct mlx5e_ch_stats ch;
720 struct mlx5e_sq_stats sq[MLX5E_MAX_NUM_TC];
1880bc4e 721 struct mlx5e_ptp_cq_stats cq[MLX5E_MAX_NUM_TC];
145e5637
EBE
722} ____cacheline_aligned_in_smp;
723
acff797c 724enum {
acff797c
MG
725 MLX5E_STATE_OPENED,
726 MLX5E_STATE_DESTROYING,
407e17b1 727 MLX5E_STATE_XDP_TX_ENABLED,
9cf88808 728 MLX5E_STATE_XDP_ACTIVE,
acff797c
MG
729};
730
398f3351 731struct mlx5e_rqt {
1da36696 732 u32 rqtn;
398f3351
HHZ
733 bool enabled;
734};
735
736struct mlx5e_tir {
737 u32 tirn;
738 struct mlx5e_rqt rqt;
739 struct list_head list;
1da36696
TT
740};
741
acff797c
MG
742enum {
743 MLX5E_TC_PRIO = 0,
744 MLX5E_NIC_PRIO
745};
746
bbeb53b8
AL
747struct mlx5e_rss_params {
748 u32 indirection_rqt[MLX5E_INDIR_RQT_SIZE];
756c4160 749 u32 rx_hash_fields[MLX5E_NUM_INDIR_TIRS];
bbeb53b8
AL
750 u8 toeplitz_hash_key[40];
751 u8 hfunc;
752};
753
de8650a8
EBE
754struct mlx5e_modify_sq_param {
755 int curr_state;
756 int next_state;
757 int rl_update;
758 int rl_index;
759};
760
cef35af3
EBE
761#if IS_ENABLED(CONFIG_PCI_HYPERV_INTERFACE)
762struct mlx5e_hv_vhca_stats_agent {
763 struct mlx5_hv_vhca_agent *agent;
764 struct delayed_work work;
765 u16 delay;
766 void *buf;
767};
768#endif
769
db05815b 770struct mlx5e_xsk {
1742b3d5
MK
771 /* XSK buffer pools are stored separately from channels,
772 * because we don't want to lose them when channels are
773 * recreated. The kernel also stores buffer pool, but it doesn't
774 * distinguish between zero-copy and non-zero-copy UMEMs, so
775 * rely on our mechanism.
db05815b 776 */
1742b3d5 777 struct xsk_buff_pool **pools;
db05815b
MM
778 u16 refcnt;
779 bool ever_used;
780};
781
3909a12e
MM
782/* Temporary storage for variables that are allocated when struct mlx5e_priv is
783 * initialized, and used where we can't allocate them because that functions
784 * must not fail. Use with care and make sure the same variable is not used
785 * simultaneously by multiple users.
786 */
787struct mlx5e_scratchpad {
788 cpumask_var_t cpumask;
789};
790
f62b8bb8
AV
791struct mlx5e_priv {
792 /* priv data path fields - start */
145e5637
EBE
793 /* +1 for port ptp ts */
794 struct mlx5e_txqsq *txq2sq[(MLX5E_MAX_NUM_CHANNELS + 1) * MLX5E_MAX_NUM_TC];
c55d8b10 795 int channel_tc2realtxq[MLX5E_MAX_NUM_CHANNELS][MLX5E_MAX_NUM_TC];
145e5637 796 int port_ptp_tc2realtxq[MLX5E_MAX_NUM_TC];
2a5e7a13
HN
797#ifdef CONFIG_MLX5_CORE_EN_DCB
798 struct mlx5e_dcbx_dp dcbx_dp;
799#endif
f62b8bb8
AV
800 /* priv data path fields - end */
801
79c48764 802 u32 msglevel;
f62b8bb8
AV
803 unsigned long state;
804 struct mutex state_lock; /* Protects Interface state */
50cfa25a 805 struct mlx5e_rq drop_rq;
f62b8bb8 806
ff9c852f 807 struct mlx5e_channels channels;
45f171b1 808 u32 tisn[MLX5_MAX_PORTS][MLX5E_MAX_NUM_TC];
398f3351 809 struct mlx5e_rqt indir_rqt;
724b2aa1 810 struct mlx5e_tir indir_tir[MLX5E_NUM_INDIR_TIRS];
7b3722fa 811 struct mlx5e_tir inner_indir_tir[MLX5E_NUM_INDIR_TIRS];
724b2aa1 812 struct mlx5e_tir direct_tir[MLX5E_MAX_NUM_CHANNELS];
db05815b 813 struct mlx5e_tir xsk_tir[MLX5E_MAX_NUM_CHANNELS];
bbeb53b8 814 struct mlx5e_rss_params rss_params;
507f0c81 815 u32 tx_rates[MLX5E_MAX_NUM_SQS];
f62b8bb8 816
acff797c 817 struct mlx5e_flow_steering fs;
f62b8bb8 818
7bb29755 819 struct workqueue_struct *wq;
f62b8bb8
AV
820 struct work_struct update_carrier_work;
821 struct work_struct set_rx_mode_work;
3947ca18 822 struct work_struct tx_timeout_work;
cdeef2b1 823 struct work_struct update_stats_work;
5c7e8bbb
ED
824 struct work_struct monitor_counters_work;
825 struct mlx5_nb monitor_counters_nb;
f62b8bb8
AV
826
827 struct mlx5_core_dev *mdev;
828 struct net_device *netdev;
829 struct mlx5e_stats stats;
05909bab 830 struct mlx5e_channel_stats channel_stats[MLX5E_MAX_NUM_CHANNELS];
145e5637 831 struct mlx5e_port_ptp_stats port_ptp_stats;
694826e3 832 u16 max_nch;
05909bab 833 u8 max_opened_tc;
145e5637 834 bool port_ptp_opened;
7c39afb3 835 struct hwtstamp_config tstamp;
7cbaf9a3
MS
836 u16 q_counter;
837 u16 drop_rq_q_counter;
7cffaddd 838 struct notifier_block events_nb;
145e5637 839 int num_tc_x_num_ch;
7cffaddd 840
18a2b7f9 841 struct udp_tunnel_nic_info nic_info;
3a6a931d
HN
842#ifdef CONFIG_MLX5_CORE_EN_DCB
843 struct mlx5e_dcbx dcbx;
844#endif
845
6bfd390b 846 const struct mlx5e_profile *profile;
127ea380 847 void *ppriv;
547eede0
IT
848#ifdef CONFIG_MLX5_EN_IPSEC
849 struct mlx5e_ipsec *ipsec;
850#endif
43585a41
IL
851#ifdef CONFIG_MLX5_EN_TLS
852 struct mlx5e_tls *tls;
853#endif
de8650a8 854 struct devlink_health_reporter *tx_reporter;
9032e719 855 struct devlink_health_reporter *rx_reporter;
162add8c 856 struct devlink_port dl_port;
db05815b 857 struct mlx5e_xsk xsk;
cef35af3
EBE
858#if IS_ENABLED(CONFIG_PCI_HYPERV_INTERFACE)
859 struct mlx5e_hv_vhca_stats_agent stats_agent;
860#endif
3909a12e 861 struct mlx5e_scratchpad scratchpad;
f62b8bb8
AV
862};
863
5adf4c47
TT
864struct mlx5e_rx_handlers {
865 mlx5e_fp_handle_rx_cqe handle_rx_cqe;
866 mlx5e_fp_handle_rx_cqe handle_rx_cqe_mpwqe;
867};
868
869extern const struct mlx5e_rx_handlers mlx5e_rx_handlers_nic;
870
a43b25da 871struct mlx5e_profile {
182570b2 872 int (*init)(struct mlx5_core_dev *mdev,
a43b25da
SM
873 struct net_device *netdev,
874 const struct mlx5e_profile *profile, void *ppriv);
875 void (*cleanup)(struct mlx5e_priv *priv);
876 int (*init_rx)(struct mlx5e_priv *priv);
877 void (*cleanup_rx)(struct mlx5e_priv *priv);
878 int (*init_tx)(struct mlx5e_priv *priv);
879 void (*cleanup_tx)(struct mlx5e_priv *priv);
880 void (*enable)(struct mlx5e_priv *priv);
881 void (*disable)(struct mlx5e_priv *priv);
a90f88fe 882 int (*update_rx)(struct mlx5e_priv *priv);
a43b25da 883 void (*update_stats)(struct mlx5e_priv *priv);
7ca42c80 884 void (*update_carrier)(struct mlx5e_priv *priv);
3460c184 885 unsigned int (*stats_grps_num)(struct mlx5e_priv *priv);
f0ff8e8c 886 mlx5e_stats_grp_t *stats_grps;
5adf4c47 887 const struct mlx5e_rx_handlers *rx_handlers;
a43b25da 888 int max_tc;
694826e3 889 u8 rq_groups;
a43b25da
SM
890};
891
665bc539
GP
892void mlx5e_build_ptys2ethtool_map(void);
893
2ccb0a79
TT
894bool mlx5e_check_fragmented_striding_rq_cap(struct mlx5_core_dev *mdev);
895bool mlx5e_striding_rq_possible(struct mlx5_core_dev *mdev,
896 struct mlx5e_params *params);
897
d9ee0491 898void mlx5e_get_stats(struct net_device *dev, struct rtnl_link_stats64 *stats);
b832d4fd 899void mlx5e_fold_sw_stats64(struct mlx5e_priv *priv, struct rtnl_link_stats64 *s);
f62b8bb8 900
33cfaaa8 901void mlx5e_init_l2_addr(struct mlx5e_priv *priv);
d605d668
KH
902int mlx5e_self_test_num(struct mlx5e_priv *priv);
903void mlx5e_self_test(struct net_device *ndev, struct ethtool_test *etest,
904 u64 *buf);
f62b8bb8
AV
905void mlx5e_set_rx_mode_work(struct work_struct *work);
906
1170fbd8
FD
907int mlx5e_hwstamp_set(struct mlx5e_priv *priv, struct ifreq *ifr);
908int mlx5e_hwstamp_get(struct mlx5e_priv *priv, struct ifreq *ifr);
be7e87f9 909int mlx5e_modify_rx_cqe_compression_locked(struct mlx5e_priv *priv, bool val);
ef9814de 910
f62b8bb8
AV
911int mlx5e_vlan_rx_add_vid(struct net_device *dev, __always_unused __be16 proto,
912 u16 vid);
913int mlx5e_vlan_rx_kill_vid(struct net_device *dev, __always_unused __be16 proto,
914 u16 vid);
237f258c 915void mlx5e_timestamp_init(struct mlx5e_priv *priv);
f62b8bb8 916
a5f97fee
SM
917struct mlx5e_redirect_rqt_param {
918 bool is_rss;
919 union {
920 u32 rqn; /* Direct RQN (Non-RSS) */
921 struct {
922 u8 hfunc;
923 struct mlx5e_channels *channels;
924 } rss; /* RSS data */
925 };
926};
927
928int mlx5e_redirect_rqt(struct mlx5e_priv *priv, u32 rqtn, int sz,
929 struct mlx5e_redirect_rqt_param rrp);
bbeb53b8 930void mlx5e_build_indir_tir_ctx_hash(struct mlx5e_rss_params *rss_params,
d930ac79 931 const struct mlx5e_tirc_config *ttconfig,
7b3722fa 932 void *tirc, bool inner);
e0b4b472 933void mlx5e_modify_tirs_hash(struct mlx5e_priv *priv, void *in);
d930ac79 934struct mlx5e_tirc_config mlx5e_tirc_get_default_config(enum mlx5e_traffic_types tt);
2d75b2bc 935
db05815b
MM
936struct mlx5e_xsk_param;
937
938struct mlx5e_rq_param;
939int mlx5e_open_rq(struct mlx5e_channel *c, struct mlx5e_params *params,
940 struct mlx5e_rq_param *param, struct mlx5e_xsk_param *xsk,
1742b3d5 941 struct xsk_buff_pool *xsk_pool, struct mlx5e_rq *rq);
db05815b
MM
942int mlx5e_wait_for_min_rx_wqes(struct mlx5e_rq *rq, int wait_time);
943void mlx5e_deactivate_rq(struct mlx5e_rq *rq);
944void mlx5e_close_rq(struct mlx5e_rq *rq);
945
946struct mlx5e_sq_param;
947int mlx5e_open_icosq(struct mlx5e_channel *c, struct mlx5e_params *params,
948 struct mlx5e_sq_param *param, struct mlx5e_icosq *sq);
949void mlx5e_close_icosq(struct mlx5e_icosq *sq);
950int mlx5e_open_xdpsq(struct mlx5e_channel *c, struct mlx5e_params *params,
1742b3d5 951 struct mlx5e_sq_param *param, struct xsk_buff_pool *xsk_pool,
db05815b
MM
952 struct mlx5e_xdpsq *sq, bool is_redirect);
953void mlx5e_close_xdpsq(struct mlx5e_xdpsq *sq);
954
4d0b7ef9
AL
955struct mlx5e_create_cq_param {
956 struct napi_struct *napi;
957 struct mlx5e_ch_stats *ch_stats;
958 int node;
959 int ix;
960};
961
db05815b 962struct mlx5e_cq_param;
4d0b7ef9
AL
963int mlx5e_open_cq(struct mlx5e_priv *priv, struct dim_cq_moder moder,
964 struct mlx5e_cq_param *param, struct mlx5e_create_cq_param *ccp,
965 struct mlx5e_cq *cq);
db05815b
MM
966void mlx5e_close_cq(struct mlx5e_cq *cq);
967
f62b8bb8
AV
968int mlx5e_open_locked(struct net_device *netdev);
969int mlx5e_close_locked(struct net_device *netdev);
55c2503d
SM
970
971int mlx5e_open_channels(struct mlx5e_priv *priv,
972 struct mlx5e_channels *chs);
973void mlx5e_close_channels(struct mlx5e_channels *chs);
2e20a151 974
dca147b3 975/* Function pointer to be used to modify HW or kernel settings while
2e20a151
SM
976 * switching channels
977 */
b9ab5d0e
MM
978typedef int (*mlx5e_fp_preactivate)(struct mlx5e_priv *priv, void *context);
979#define MLX5E_DEFINE_PREACTIVATE_WRAPPER_CTX(fn) \
980int fn##_ctx(struct mlx5e_priv *priv, void *context) \
981{ \
982 return fn(priv); \
983}
484c1ada 984int mlx5e_safe_reopen_channels(struct mlx5e_priv *priv);
877662e2
TT
985int mlx5e_safe_switch_channels(struct mlx5e_priv *priv,
986 struct mlx5e_channels *new_chs,
b9ab5d0e
MM
987 mlx5e_fp_preactivate preactivate,
988 void *context);
fe867cac 989int mlx5e_num_channels_changed(struct mlx5e_priv *priv);
b9ab5d0e 990int mlx5e_num_channels_changed_ctx(struct mlx5e_priv *priv, void *context);
603f4a45
SM
991void mlx5e_activate_priv_channels(struct mlx5e_priv *priv);
992void mlx5e_deactivate_priv_channels(struct mlx5e_priv *priv);
55c2503d 993
d4b6c488 994void mlx5e_build_default_indir_rqt(u32 *indirection_rqt, int len,
85082dba 995 int num_channels);
ebeaf084
TG
996
997void mlx5e_reset_tx_moderation(struct mlx5e_params *params, u8 cq_period_mode);
998void mlx5e_reset_rx_moderation(struct mlx5e_params *params, u8 cq_period_mode);
999void mlx5e_set_tx_cq_mode_params(struct mlx5e_params *params, u8 cq_period_mode);
1000void mlx5e_set_rx_cq_mode_params(struct mlx5e_params *params, u8 cq_period_mode);
1001
2ccb0a79 1002void mlx5e_set_rq_type(struct mlx5_core_dev *mdev, struct mlx5e_params *params);
696a97cf 1003void mlx5e_init_rq_type_params(struct mlx5_core_dev *mdev,
2a0f561b 1004 struct mlx5e_params *params);
be5323c8
AL
1005int mlx5e_modify_rq_state(struct mlx5e_rq *rq, int curr_state, int next_state);
1006void mlx5e_activate_rq(struct mlx5e_rq *rq);
1007void mlx5e_deactivate_rq(struct mlx5e_rq *rq);
be5323c8
AL
1008void mlx5e_activate_icosq(struct mlx5e_icosq *icosq);
1009void mlx5e_deactivate_icosq(struct mlx5e_icosq *icosq);
9908aa29 1010
de8650a8
EBE
1011int mlx5e_modify_sq(struct mlx5_core_dev *mdev, u32 sqn,
1012 struct mlx5e_modify_sq_param *p);
1013void mlx5e_activate_txqsq(struct mlx5e_txqsq *sq);
145e5637
EBE
1014void mlx5e_deactivate_txqsq(struct mlx5e_txqsq *sq);
1015void mlx5e_free_txqsq(struct mlx5e_txqsq *sq);
de8650a8 1016void mlx5e_tx_disable_queue(struct netdev_queue *txq);
145e5637
EBE
1017int mlx5e_alloc_txqsq_db(struct mlx5e_txqsq *sq, int numa);
1018void mlx5e_free_txqsq_db(struct mlx5e_txqsq *sq);
1019struct mlx5e_create_sq_param;
1020int mlx5e_create_sq_rdy(struct mlx5_core_dev *mdev,
1021 struct mlx5e_sq_param *param,
1022 struct mlx5e_create_sq_param *csp,
1023 u32 *sqn);
1024void mlx5e_tx_err_cqe_work(struct work_struct *recover_work);
de8650a8 1025
e3cfc7e6
MS
1026static inline bool mlx5_tx_swp_supported(struct mlx5_core_dev *mdev)
1027{
1028 return MLX5_CAP_ETH(mdev, swp) &&
1029 MLX5_CAP_ETH(mdev, swp_csum) && MLX5_CAP_ETH(mdev, swp_lso);
1030}
1031
f62b8bb8 1032extern const struct ethtool_ops mlx5e_ethtool_ops;
08fb1dac 1033
e0b4b472
LR
1034int mlx5e_create_tir(struct mlx5_core_dev *mdev, struct mlx5e_tir *tir,
1035 u32 *in);
724b2aa1
HHZ
1036void mlx5e_destroy_tir(struct mlx5_core_dev *mdev,
1037 struct mlx5e_tir *tir);
b50d292b
HHZ
1038int mlx5e_create_mdev_resources(struct mlx5_core_dev *mdev);
1039void mlx5e_destroy_mdev_resources(struct mlx5_core_dev *mdev);
80639b19
ES
1040int mlx5e_refresh_tirs(struct mlx5e_priv *priv, bool enable_uc_lb,
1041 bool enable_mc_lb);
17347d54 1042void mlx5e_mkey_set_relaxed_ordering(struct mlx5_core_dev *mdev, void *mkc);
1afff42c 1043
bc81b9d3 1044/* common netdev helpers */
1462e48d
RD
1045void mlx5e_create_q_counters(struct mlx5e_priv *priv);
1046void mlx5e_destroy_q_counters(struct mlx5e_priv *priv);
1047int mlx5e_open_drop_rq(struct mlx5e_priv *priv,
1048 struct mlx5e_rq *drop_rq);
1049void mlx5e_close_drop_rq(struct mlx5e_rq *drop_rq);
1050
8f493ffd
SM
1051int mlx5e_create_indirect_rqt(struct mlx5e_priv *priv);
1052
46dc933c 1053int mlx5e_create_indirect_tirs(struct mlx5e_priv *priv, bool inner_ttc);
a16b8e0d 1054void mlx5e_destroy_indirect_tirs(struct mlx5e_priv *priv);
8f493ffd 1055
db05815b
MM
1056int mlx5e_create_direct_rqts(struct mlx5e_priv *priv, struct mlx5e_tir *tirs);
1057void mlx5e_destroy_direct_rqts(struct mlx5e_priv *priv, struct mlx5e_tir *tirs);
1058int mlx5e_create_direct_tirs(struct mlx5e_priv *priv, struct mlx5e_tir *tirs);
1059void mlx5e_destroy_direct_tirs(struct mlx5e_priv *priv, struct mlx5e_tir *tirs);
8f493ffd
SM
1060void mlx5e_destroy_rqt(struct mlx5e_priv *priv, struct mlx5e_rqt *rqt);
1061
2b257a6e 1062int mlx5e_create_tis(struct mlx5_core_dev *mdev, void *in, u32 *tisn);
5426a0b2
SM
1063void mlx5e_destroy_tis(struct mlx5_core_dev *mdev, u32 tisn);
1064
cb67b832 1065int mlx5e_create_tises(struct mlx5e_priv *priv);
3c145626 1066void mlx5e_destroy_tises(struct mlx5e_priv *priv);
a90f88fe 1067int mlx5e_update_nic_rx(struct mlx5e_priv *priv);
b36cdb42 1068void mlx5e_update_carrier(struct mlx5e_priv *priv);
cb67b832
HHZ
1069int mlx5e_close(struct net_device *netdev);
1070int mlx5e_open(struct net_device *netdev);
cb67b832 1071
cdeef2b1 1072void mlx5e_queue_update_stats(struct mlx5e_priv *priv);
3f6d08d1
OG
1073int mlx5e_bits_invert(unsigned long a, int size);
1074
d9ee0491 1075int mlx5e_set_dev_port_mtu(struct mlx5e_priv *priv);
b9ab5d0e 1076int mlx5e_set_dev_port_mtu_ctx(struct mlx5e_priv *priv, void *context);
250a42b6 1077int mlx5e_change_mtu(struct net_device *netdev, int new_mtu,
b9ab5d0e 1078 mlx5e_fp_preactivate preactivate);
18a2b7f9 1079void mlx5e_vxlan_set_netdev_info(struct mlx5e_priv *priv);
250a42b6 1080
076b0936
ES
1081/* ethtool helpers */
1082void mlx5e_ethtool_get_drvinfo(struct mlx5e_priv *priv,
1083 struct ethtool_drvinfo *drvinfo);
1084void mlx5e_ethtool_get_strings(struct mlx5e_priv *priv,
1085 uint32_t stringset, uint8_t *data);
1086int mlx5e_ethtool_get_sset_count(struct mlx5e_priv *priv, int sset);
1087void mlx5e_ethtool_get_ethtool_stats(struct mlx5e_priv *priv,
1088 struct ethtool_stats *stats, u64 *data);
1089void mlx5e_ethtool_get_ringparam(struct mlx5e_priv *priv,
1090 struct ethtool_ringparam *param);
1091int mlx5e_ethtool_set_ringparam(struct mlx5e_priv *priv,
1092 struct ethtool_ringparam *param);
1093void mlx5e_ethtool_get_channels(struct mlx5e_priv *priv,
1094 struct ethtool_channels *ch);
1095int mlx5e_ethtool_set_channels(struct mlx5e_priv *priv,
1096 struct ethtool_channels *ch);
1097int mlx5e_ethtool_get_coalesce(struct mlx5e_priv *priv,
1098 struct ethtool_coalesce *coal);
1099int mlx5e_ethtool_set_coalesce(struct mlx5e_priv *priv,
1100 struct ethtool_coalesce *coal);
371289b6
OG
1101int mlx5e_ethtool_get_link_ksettings(struct mlx5e_priv *priv,
1102 struct ethtool_link_ksettings *link_ksettings);
1103int mlx5e_ethtool_set_link_ksettings(struct mlx5e_priv *priv,
1104 const struct ethtool_link_ksettings *link_ksettings);
01013ad3
VB
1105int mlx5e_get_rxfh(struct net_device *netdev, u32 *indir, u8 *key, u8 *hfunc);
1106int mlx5e_set_rxfh(struct net_device *dev, const u32 *indir, const u8 *key,
1107 const u8 hfunc);
b63293e7
VB
1108int mlx5e_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info,
1109 u32 *rule_locs);
1110int mlx5e_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd);
a5355de8
OG
1111u32 mlx5e_ethtool_get_rxfh_key_size(struct mlx5e_priv *priv);
1112u32 mlx5e_ethtool_get_rxfh_indir_size(struct mlx5e_priv *priv);
3844b07e
FD
1113int mlx5e_ethtool_get_ts_info(struct mlx5e_priv *priv,
1114 struct ethtool_ts_info *info);
f43d48d1
EBE
1115int mlx5e_ethtool_flash_device(struct mlx5e_priv *priv,
1116 struct ethtool_flash *flash);
371289b6
OG
1117void mlx5e_ethtool_get_pauseparam(struct mlx5e_priv *priv,
1118 struct ethtool_pauseparam *pauseparam);
1119int mlx5e_ethtool_set_pauseparam(struct mlx5e_priv *priv,
1120 struct ethtool_pauseparam *pauseparam);
076b0936 1121
2c3b5bee 1122/* mlx5e generic netdev management API */
519a0bf5
SM
1123int mlx5e_netdev_init(struct net_device *netdev,
1124 struct mlx5e_priv *priv,
1125 struct mlx5_core_dev *mdev,
1126 const struct mlx5e_profile *profile,
1127 void *ppriv);
182570b2 1128void mlx5e_netdev_cleanup(struct net_device *netdev, struct mlx5e_priv *priv);
2c3b5bee
SM
1129struct net_device*
1130mlx5e_create_netdev(struct mlx5_core_dev *mdev, const struct mlx5e_profile *profile,
779d986d 1131 int nch, void *ppriv);
2c3b5bee
SM
1132int mlx5e_attach_netdev(struct mlx5e_priv *priv);
1133void mlx5e_detach_netdev(struct mlx5e_priv *priv);
1134void mlx5e_destroy_netdev(struct mlx5e_priv *priv);
6d7ee2ed 1135void mlx5e_set_netdev_mtu_boundaries(struct mlx5e_priv *priv);
57c7fce1 1136void mlx5e_build_nic_params(struct mlx5e_priv *priv,
db05815b 1137 struct mlx5e_xsk *xsk,
bbeb53b8 1138 struct mlx5e_rss_params *rss_params,
8f493ffd 1139 struct mlx5e_params *params,
57c7fce1 1140 u16 mtu);
749359f4
GT
1141void mlx5e_build_rq_params(struct mlx5_core_dev *mdev,
1142 struct mlx5e_params *params);
bbeb53b8
AL
1143void mlx5e_build_rss_params(struct mlx5e_rss_params *rss_params,
1144 u16 num_channels);
9a317425 1145void mlx5e_rx_dim_work(struct work_struct *work);
cbce4f44 1146void mlx5e_tx_dim_work(struct work_struct *work);
073caf50 1147
073caf50
OG
1148netdev_features_t mlx5e_features_check(struct sk_buff *skb,
1149 struct net_device *netdev,
1150 netdev_features_t features);
d3cbd425 1151int mlx5e_set_features(struct net_device *netdev, netdev_features_t features);
073caf50
OG
1152#ifdef CONFIG_MLX5_ESWITCH
1153int mlx5e_set_vf_mac(struct net_device *dev, int vf, u8 *mac);
1154int mlx5e_set_vf_rate(struct net_device *dev, int vf, int min_tx_rate, int max_tx_rate);
1155int mlx5e_get_vf_config(struct net_device *dev, int vf, struct ifla_vf_info *ivi);
1156int mlx5e_get_vf_stats(struct net_device *dev, int vf, struct ifla_vf_stats *vf_stats);
1157#endif
1afff42c 1158#endif /* __MLX5_EN_H__ */