]> git.proxmox.com Git - mirror_ubuntu-hirsute-kernel.git/blame - drivers/net/ethernet/mellanox/mlx5/core/en.h
net/mlx5e: Always initialize update stats delayed work
[mirror_ubuntu-hirsute-kernel.git] / drivers / net / ethernet / mellanox / mlx5 / core / en.h
CommitLineData
f62b8bb8 1/*
1afff42c 2 * Copyright (c) 2015-2016, Mellanox Technologies. All rights reserved.
f62b8bb8
AV
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
1afff42c
MF
32#ifndef __MLX5_EN_H__
33#define __MLX5_EN_H__
f62b8bb8
AV
34
35#include <linux/if_vlan.h>
36#include <linux/etherdevice.h>
ef9814de
EBE
37#include <linux/timecounter.h>
38#include <linux/net_tstamp.h>
3d8c38af 39#include <linux/ptp_clock_kernel.h>
48935bbb 40#include <linux/crash_dump.h>
f62b8bb8
AV
41#include <linux/mlx5/driver.h>
42#include <linux/mlx5/qp.h>
43#include <linux/mlx5/cq.h>
ada68c31 44#include <linux/mlx5/port.h>
d18a9470 45#include <linux/mlx5/vport.h>
8d7f9ecb 46#include <linux/mlx5/transobj.h>
1ae1df3a 47#include <linux/mlx5/fs.h>
e8f887ac 48#include <linux/rhashtable.h>
cb67b832 49#include <net/switchdev.h>
0ddf5432 50#include <net/xdp.h>
4c4dbb4a 51#include <linux/net_dim.h>
f62b8bb8 52#include "wq.h"
f62b8bb8 53#include "mlx5_core.h"
9218b44d 54#include "en_stats.h"
fe6d86b3 55#include "en/fs.h"
f62b8bb8 56
4d8fcf21 57extern const struct net_device_ops mlx5e_netdev_ops;
60bbf7ee
JDB
58struct page_pool;
59
bb909416
IL
60#define MLX5E_METADATA_ETHER_TYPE (0x8CE4)
61#define MLX5E_METADATA_ETHER_LEN 8
62
1cabe6b0
MG
63#define MLX5_SET_CFG(p, f, v) MLX5_SET(create_flow_group_in, p, f, v)
64
c139dbfd
ES
65#define MLX5E_ETH_HARD_MTU (ETH_HLEN + VLAN_HLEN + ETH_FCS_LEN)
66
472a1e44
TT
67#define MLX5E_HW2SW_MTU(params, hwmtu) ((hwmtu) - ((params)->hard_mtu))
68#define MLX5E_SW2HW_MTU(params, swmtu) ((swmtu) + ((params)->hard_mtu))
d8bec2b2 69
0696d608 70#define MLX5E_MAX_PRIORITY 8
2a5e7a13 71#define MLX5E_MAX_DSCP 64
f62b8bb8
AV
72#define MLX5E_MAX_NUM_TC 8
73
1bfecfca 74#define MLX5_RX_HEADROOM NET_SKB_PAD
78aedd32
TT
75#define MLX5_SKB_FRAG_SZ(len) (SKB_DATA_ALIGN(len) + \
76 SKB_DATA_ALIGN(sizeof(struct skb_shared_info)))
1bfecfca 77
f32f5bd2
DJ
78#define MLX5_MPWRQ_MIN_LOG_STRIDE_SZ(mdev) \
79 (6 + MLX5_CAP_GEN(mdev, cache_line_128byte)) /* HW restriction */
80#define MLX5_MPWRQ_LOG_STRIDE_SZ(mdev, req) \
81 max_t(u32, MLX5_MPWRQ_MIN_LOG_STRIDE_SZ(mdev), req)
82#define MLX5_MPWRQ_DEF_LOG_STRIDE_SZ(mdev) MLX5_MPWRQ_LOG_STRIDE_SZ(mdev, 6)
83#define MLX5_MPWRQ_CQE_CMPRS_LOG_STRIDE_SZ(mdev) MLX5_MPWRQ_LOG_STRIDE_SZ(mdev, 8)
696a97cf
EE
84#define MLX5E_MPWQE_STRIDE_SZ(mdev, cqe_cmprs) \
85 (cqe_cmprs ? MLX5_MPWRQ_CQE_CMPRS_LOG_STRIDE_SZ(mdev) : \
86 MLX5_MPWRQ_DEF_LOG_STRIDE_SZ(mdev))
f32f5bd2 87
7e426671 88#define MLX5_MPWRQ_LOG_WQE_SZ 18
461017cb
TT
89#define MLX5_MPWRQ_WQE_PAGE_ORDER (MLX5_MPWRQ_LOG_WQE_SZ - PAGE_SHIFT > 0 ? \
90 MLX5_MPWRQ_LOG_WQE_SZ - PAGE_SHIFT : 0)
91#define MLX5_MPWRQ_PAGES_PER_WQE BIT(MLX5_MPWRQ_WQE_PAGE_ORDER)
fe4c988b
SM
92
93#define MLX5_MTT_OCTW(npages) (ALIGN(npages, 8) / 2)
73281b78 94#define MLX5E_REQUIRED_WQE_MTTS (ALIGN(MLX5_MPWRQ_PAGES_PER_WQE, 8))
b8a98a4c 95#define MLX5E_LOG_ALIGNED_MPWQE_PPW (ilog2(MLX5E_REQUIRED_WQE_MTTS))
73281b78
TT
96#define MLX5E_REQUIRED_MTTS(wqes) (wqes * MLX5E_REQUIRED_WQE_MTTS)
97#define MLX5E_MAX_RQ_NUM_MTTS \
98 ((1 << 16) * 2) /* So that MLX5_MTT_OCTW(num_mtts) fits into u16 */
99#define MLX5E_ORDER2_MAX_PACKET_MTU (order_base_2(10 * 1024))
100#define MLX5E_PARAMS_MAXIMUM_LOG_RQ_SIZE_MPW \
101 (ilog2(MLX5E_MAX_RQ_NUM_MTTS / MLX5E_REQUIRED_WQE_MTTS))
102#define MLX5E_LOG_MAX_RQ_NUM_PACKETS_MPW \
103 (MLX5E_PARAMS_MAXIMUM_LOG_RQ_SIZE_MPW + \
104 (MLX5_MPWRQ_LOG_WQE_SZ - MLX5E_ORDER2_MAX_PACKET_MTU))
105
069d1146
TT
106#define MLX5E_MIN_SKB_FRAG_SZ (MLX5_SKB_FRAG_SZ(MLX5_RX_HEADROOM))
107#define MLX5E_LOG_MAX_RX_WQE_BULK \
108 (ilog2(PAGE_SIZE / roundup_pow_of_two(MLX5E_MIN_SKB_FRAG_SZ)))
109
73281b78
TT
110#define MLX5E_PARAMS_MINIMUM_LOG_SQ_SIZE 0x6
111#define MLX5E_PARAMS_DEFAULT_LOG_SQ_SIZE 0xa
112#define MLX5E_PARAMS_MAXIMUM_LOG_SQ_SIZE 0xd
113
069d1146 114#define MLX5E_PARAMS_MINIMUM_LOG_RQ_SIZE (1 + MLX5E_LOG_MAX_RX_WQE_BULK)
73281b78
TT
115#define MLX5E_PARAMS_DEFAULT_LOG_RQ_SIZE 0xa
116#define MLX5E_PARAMS_MAXIMUM_LOG_RQ_SIZE min_t(u8, 0xd, \
117 MLX5E_LOG_MAX_RQ_NUM_PACKETS_MPW)
118
119#define MLX5E_PARAMS_MINIMUM_LOG_RQ_SIZE_MPW 0x2
fe4c988b 120
75aa889f 121#define MLX5E_RX_MAX_HEAD (256)
461017cb 122
d9a40271 123#define MLX5E_PARAMS_DEFAULT_LRO_WQE_SZ (64 * 1024)
2b029556
SM
124#define MLX5E_DEFAULT_LRO_TIMEOUT 32
125#define MLX5E_LRO_TIMEOUT_ARR_SIZE 4
126
f62b8bb8 127#define MLX5E_PARAMS_DEFAULT_RX_CQ_MODERATION_USEC 0x10
9908aa29 128#define MLX5E_PARAMS_DEFAULT_RX_CQ_MODERATION_USEC_FROM_CQE 0x3
f62b8bb8
AV
129#define MLX5E_PARAMS_DEFAULT_RX_CQ_MODERATION_PKTS 0x20
130#define MLX5E_PARAMS_DEFAULT_TX_CQ_MODERATION_USEC 0x10
0088cbbc 131#define MLX5E_PARAMS_DEFAULT_TX_CQ_MODERATION_USEC_FROM_CQE 0x10
f62b8bb8
AV
132#define MLX5E_PARAMS_DEFAULT_TX_CQ_MODERATION_PKTS 0x20
133#define MLX5E_PARAMS_DEFAULT_MIN_RX_WQES 0x80
461017cb 134#define MLX5E_PARAMS_DEFAULT_MIN_RX_WQES_MPW 0x2
f62b8bb8 135
936896e9
AS
136#define MLX5E_LOG_INDIR_RQT_SIZE 0x7
137#define MLX5E_INDIR_RQT_SIZE BIT(MLX5E_LOG_INDIR_RQT_SIZE)
b4e029da 138#define MLX5E_MIN_NUM_CHANNELS 0x1
936896e9 139#define MLX5E_MAX_NUM_CHANNELS (MLX5E_INDIR_RQT_SIZE >> 1)
507f0c81 140#define MLX5E_MAX_NUM_SQS (MLX5E_MAX_NUM_CHANNELS * MLX5E_MAX_NUM_TC)
f62b8bb8 141#define MLX5E_TX_CQ_POLL_BUDGET 128
db75373c 142#define MLX5E_SQ_RECOVER_MIN_INTERVAL 500 /* msecs */
f62b8bb8 143
ea3886ca
TT
144#define MLX5E_UMR_WQE_INLINE_SZ \
145 (sizeof(struct mlx5e_umr_wqe) + \
146 ALIGN(MLX5_MPWRQ_PAGES_PER_WQE * sizeof(struct mlx5_mtt), \
147 MLX5_UMR_MTT_ALIGNMENT))
148#define MLX5E_UMR_WQEBBS \
149 (DIV_ROUND_UP(MLX5E_UMR_WQE_INLINE_SZ, MLX5_SEND_WQE_BB))
150#define MLX5E_ICOSQ_MAX_WQEBBS MLX5E_UMR_WQEBBS
f10b7cc7 151
86d722ad 152#define MLX5E_NUM_MAIN_GROUPS 9
2f48af12 153
79c48764
GP
154#define MLX5E_MSG_LEVEL NETIF_MSG_LINK
155
156#define mlx5e_dbg(mlevel, priv, format, ...) \
157do { \
158 if (NETIF_MSG_##mlevel & (priv)->msglevel) \
159 netdev_warn(priv->netdev, format, \
160 ##__VA_ARGS__); \
161} while (0)
162
163
461017cb
TT
164static inline u16 mlx5_min_rx_wqes(int wq_type, u32 wq_size)
165{
166 switch (wq_type) {
167 case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ:
168 return min_t(u16, MLX5E_PARAMS_DEFAULT_MIN_RX_WQES_MPW,
169 wq_size / 2);
170 default:
171 return min_t(u16, MLX5E_PARAMS_DEFAULT_MIN_RX_WQES,
172 wq_size / 2);
173 }
174}
175
48935bbb
SM
176static inline int mlx5e_get_max_num_channels(struct mlx5_core_dev *mdev)
177{
178 return is_kdump_kernel() ?
179 MLX5E_MIN_NUM_CHANNELS :
180 min_t(int, mdev->priv.eq_table.num_comp_vectors,
181 MLX5E_MAX_NUM_CHANNELS);
182}
183
2f48af12
TT
184struct mlx5e_tx_wqe {
185 struct mlx5_wqe_ctrl_seg ctrl;
186 struct mlx5_wqe_eth_seg eth;
043dc78e 187 struct mlx5_wqe_data_seg data[0];
2f48af12
TT
188};
189
99cbfa93 190struct mlx5e_rx_wqe_ll {
2f48af12 191 struct mlx5_wqe_srq_next_seg next;
99cbfa93
TT
192 struct mlx5_wqe_data_seg data[0];
193};
194
195struct mlx5e_rx_wqe_cyc {
196 struct mlx5_wqe_data_seg data[0];
2f48af12 197};
86d722ad 198
bc77b240
TT
199struct mlx5e_umr_wqe {
200 struct mlx5_wqe_ctrl_seg ctrl;
201 struct mlx5_wqe_umr_ctrl_seg uctrl;
202 struct mlx5_mkey_seg mkc;
ea3886ca 203 struct mlx5_mtt inline_mtts[0];
bc77b240
TT
204};
205
d605d668
KH
206extern const char mlx5e_self_tests[][ETH_GSTRING_LEN];
207
4e59e288 208enum mlx5e_priv_flag {
9908aa29 209 MLX5E_PFLAG_RX_CQE_BASED_MODER = (1 << 0),
0088cbbc
TG
210 MLX5E_PFLAG_TX_CQE_BASED_MODER = (1 << 1),
211 MLX5E_PFLAG_RX_CQE_COMPRESS = (1 << 2),
2ccb0a79 212 MLX5E_PFLAG_RX_STRIDING_RQ = (1 << 3),
b856df28 213 MLX5E_PFLAG_RX_NO_CSUM_COMPLETE = (1 << 4),
4e59e288
GP
214};
215
6a9764ef 216#define MLX5E_SET_PFLAG(params, pflag, enable) \
59ece1c9
SD
217 do { \
218 if (enable) \
6a9764ef 219 (params)->pflags |= (pflag); \
59ece1c9 220 else \
6a9764ef 221 (params)->pflags &= ~(pflag); \
4e59e288
GP
222 } while (0)
223
6a9764ef 224#define MLX5E_GET_PFLAG(params, pflag) (!!((params)->pflags & (pflag)))
59ece1c9 225
08fb1dac
SM
226#ifdef CONFIG_MLX5_CORE_EN_DCB
227#define MLX5E_MAX_BW_ALLOC 100 /* Max percentage of BW allocation */
08fb1dac
SM
228#endif
229
f62b8bb8
AV
230struct mlx5e_params {
231 u8 log_sq_size;
461017cb 232 u8 rq_wq_type;
73281b78 233 u8 log_rq_mtu_frames;
f62b8bb8 234 u16 num_channels;
f62b8bb8 235 u8 num_tc;
9bcc8606 236 bool rx_cqe_compress_def;
9a317425
AG
237 struct net_dim_cq_moder rx_cq_moderation;
238 struct net_dim_cq_moder tx_cq_moderation;
f62b8bb8
AV
239 bool lro_en;
240 u32 lro_wqe_sz;
cff92d7c 241 u8 tx_min_inline_mode;
2d75b2bc
AS
242 u8 rss_hfunc;
243 u8 toeplitz_hash_key[40];
244 u32 indirection_rqt[MLX5E_INDIR_RQT_SIZE];
36350114 245 bool vlan_strip_disable;
102722fc 246 bool scatter_fcs_en;
9a317425 247 bool rx_dim_enabled;
cbce4f44 248 bool tx_dim_enabled;
2b029556 249 u32 lro_timeout;
59ece1c9 250 u32 pflags;
6a9764ef 251 struct bpf_prog *xdp_prog;
472a1e44
TT
252 unsigned int sw_mtu;
253 int hard_mtu;
f62b8bb8
AV
254};
255
3a6a931d
HN
256#ifdef CONFIG_MLX5_CORE_EN_DCB
257struct mlx5e_cee_config {
258 /* bw pct for priority group */
259 u8 pg_bw_pct[CEE_DCBX_MAX_PGS];
260 u8 prio_to_pg_map[CEE_DCBX_MAX_PRIO];
261 bool pfc_setting[CEE_DCBX_MAX_PRIO];
262 bool pfc_enable;
263};
264
265enum {
266 MLX5_DCB_CHG_RESET,
267 MLX5_DCB_NO_CHG,
268 MLX5_DCB_CHG_NO_RESET,
269};
270
271struct mlx5e_dcbx {
e207b7e9 272 enum mlx5_dcbx_oper_mode mode;
3a6a931d 273 struct mlx5e_cee_config cee_cfg; /* pending configuration */
2a5e7a13 274 u8 dscp_app_cnt;
820c2c5e
HN
275
276 /* The only setting that cannot be read from FW */
277 u8 tc_tsa[IEEE_8021QAZ_MAX_TCS];
9e10bf1d 278 u8 cap;
0696d608
HN
279
280 /* Buffer configuration */
ecdf2dad 281 bool manual_buffer;
0696d608
HN
282 u32 cable_len;
283 u32 xoff;
3a6a931d 284};
2a5e7a13
HN
285
286struct mlx5e_dcbx_dp {
287 u8 dscp2prio[MLX5E_MAX_DSCP];
288 u8 trust_state;
289};
3a6a931d
HN
290#endif
291
f62b8bb8 292enum {
c0f1147d 293 MLX5E_RQ_STATE_ENABLED,
cb3c7fd4 294 MLX5E_RQ_STATE_AM,
b856df28 295 MLX5E_RQ_STATE_NO_CSUM_COMPLETE,
f62b8bb8
AV
296};
297
f62b8bb8
AV
298struct mlx5e_cq {
299 /* data path - accessed per cqe */
300 struct mlx5_cqwq wq;
f62b8bb8
AV
301
302 /* data path - accessed per napi poll */
cb3c7fd4 303 u16 event_ctr;
f62b8bb8
AV
304 struct napi_struct *napi;
305 struct mlx5_core_cq mcq;
306 struct mlx5e_channel *channel;
307
7219ab34
TT
308 /* cqe decompression */
309 struct mlx5_cqe64 title;
310 struct mlx5_mini_cqe8 mini_arr[MLX5_MINI_CQE_ARRAY_SIZE];
311 u8 mini_arr_idx;
312 u16 decmprs_left;
313 u16 decmprs_wqe_counter;
314
f62b8bb8 315 /* control */
a43b25da 316 struct mlx5_core_dev *mdev;
3a2f7033 317 struct mlx5_wq_ctrl wq_ctrl;
f62b8bb8
AV
318} ____cacheline_aligned_in_smp;
319
eba2db2b 320struct mlx5e_tx_wqe_info {
77bdf895 321 struct sk_buff *skb;
eba2db2b
SM
322 u32 num_bytes;
323 u8 num_wqebbs;
324 u8 num_dma;
325};
326
327enum mlx5e_dma_map_type {
328 MLX5E_DMA_MAP_SINGLE,
329 MLX5E_DMA_MAP_PAGE
330};
331
332struct mlx5e_sq_dma {
333 dma_addr_t addr;
334 u32 size;
335 enum mlx5e_dma_map_type type;
336};
337
338enum {
339 MLX5E_SQ_STATE_ENABLED,
db75373c 340 MLX5E_SQ_STATE_RECOVERING,
2ac9cfe7 341 MLX5E_SQ_STATE_IPSEC,
cbce4f44 342 MLX5E_SQ_STATE_AM,
bf239741 343 MLX5E_SQ_STATE_TLS,
58b99ee3 344 MLX5E_SQ_STATE_REDIRECT,
eba2db2b
SM
345};
346
347struct mlx5e_sq_wqe_info {
348 u8 opcode;
eba2db2b 349};
2f48af12 350
31391048 351struct mlx5e_txqsq {
eba2db2b
SM
352 /* data path */
353
354 /* dirtied @completion */
355 u16 cc;
356 u32 dma_fifo_cc;
cbce4f44 357 struct net_dim dim; /* Adaptive Moderation */
eba2db2b
SM
358
359 /* dirtied @xmit */
360 u16 pc ____cacheline_aligned_in_smp;
361 u32 dma_fifo_pc;
eba2db2b
SM
362
363 struct mlx5e_cq cq;
364
eba2db2b
SM
365 /* read only */
366 struct mlx5_wq_cyc wq;
367 u32 dma_fifo_mask;
05909bab 368 struct mlx5e_sq_stats *stats;
9a3956da
TT
369 struct {
370 struct mlx5e_sq_dma *dma_fifo;
371 struct mlx5e_tx_wqe_info *wqe_info;
372 } db;
eba2db2b
SM
373 void __iomem *uar_map;
374 struct netdev_queue *txq;
375 u32 sqn;
eba2db2b 376 u8 min_inline_mode;
eba2db2b 377 struct device *pdev;
eba2db2b
SM
378 __be32 mkey_be;
379 unsigned long state;
7c39afb3
FD
380 struct hwtstamp_config *tstamp;
381 struct mlx5_clock *clock;
eba2db2b
SM
382
383 /* control path */
384 struct mlx5_wq_ctrl wq_ctrl;
385 struct mlx5e_channel *channel;
acc6c595 386 int txq_ix;
eba2db2b 387 u32 rate_limit;
db75373c
EBE
388 struct mlx5e_txqsq_recover {
389 struct work_struct recover_work;
390 u64 last_recover;
391 } recover;
31391048
SM
392} ____cacheline_aligned_in_smp;
393
c94e4f11
TT
394struct mlx5e_dma_info {
395 struct page *page;
396 dma_addr_t addr;
397};
398
399struct mlx5e_xdp_info {
400 struct xdp_frame *xdpf;
401 dma_addr_t dma_addr;
402 struct mlx5e_dma_info di;
403};
404
31391048
SM
405struct mlx5e_xdpsq {
406 /* data path */
407
dac0d15f 408 /* dirtied @completion */
31391048 409 u16 cc;
dac0d15f 410 bool redirect_flush;
31391048 411
dac0d15f
TT
412 /* dirtied @xmit */
413 u16 pc ____cacheline_aligned_in_smp;
414 bool doorbell;
31391048 415
dac0d15f 416 struct mlx5e_cq cq;
31391048
SM
417
418 /* read only */
419 struct mlx5_wq_cyc wq;
890388ad 420 struct mlx5e_xdpsq_stats *stats;
dac0d15f
TT
421 struct {
422 struct mlx5e_xdp_info *xdpi;
423 } db;
31391048
SM
424 void __iomem *uar_map;
425 u32 sqn;
426 struct device *pdev;
427 __be32 mkey_be;
428 u8 min_inline_mode;
429 unsigned long state;
c94e4f11 430 unsigned int hw_mtu;
31391048
SM
431
432 /* control path */
433 struct mlx5_wq_ctrl wq_ctrl;
434 struct mlx5e_channel *channel;
435} ____cacheline_aligned_in_smp;
436
437struct mlx5e_icosq {
438 /* data path */
439
31391048
SM
440 /* dirtied @xmit */
441 u16 pc ____cacheline_aligned_in_smp;
31391048
SM
442
443 struct mlx5e_cq cq;
444
445 /* write@xmit, read@completion */
446 struct {
447 struct mlx5e_sq_wqe_info *ico_wqe;
448 } db;
449
450 /* read only */
451 struct mlx5_wq_cyc wq;
452 void __iomem *uar_map;
453 u32 sqn;
31391048
SM
454 unsigned long state;
455
456 /* control path */
457 struct mlx5_wq_ctrl wq_ctrl;
458 struct mlx5e_channel *channel;
eba2db2b
SM
459} ____cacheline_aligned_in_smp;
460
864b2d71
SM
461static inline bool
462mlx5e_wqc_has_room_for(struct mlx5_wq_cyc *wq, u16 cc, u16 pc, u16 n)
eba2db2b 463{
ddf385e3 464 return (mlx5_wq_cyc_ctr2ix(wq, cc - pc) >= n) || (cc == pc);
eba2db2b 465}
6cd392a0 466
accd5883 467struct mlx5e_wqe_frag_info {
069d1146 468 struct mlx5e_dma_info *di;
accd5883 469 u32 offset;
069d1146 470 bool last_in_page;
accd5883
TT
471};
472
eba2db2b 473struct mlx5e_umr_dma_info {
eba2db2b 474 struct mlx5e_dma_info dma_info[MLX5_MPWRQ_PAGES_PER_WQE];
eba2db2b
SM
475};
476
477struct mlx5e_mpw_info {
478 struct mlx5e_umr_dma_info umr;
479 u16 consumed_strides;
22f45398 480 DECLARE_BITMAP(xdp_xmit_bitmap, MLX5_MPWRQ_PAGES_PER_WQE);
eba2db2b
SM
481};
482
069d1146
TT
483#define MLX5E_MAX_RX_FRAGS 4
484
4415a031
TT
485/* a single cache unit is capable to serve one napi call (for non-striding rq)
486 * or a MPWQE (for striding rq).
487 */
488#define MLX5E_CACHE_UNIT (MLX5_MPWRQ_PAGES_PER_WQE > NAPI_POLL_WEIGHT ? \
489 MLX5_MPWRQ_PAGES_PER_WQE : NAPI_POLL_WEIGHT)
29c2849e 490#define MLX5E_CACHE_SIZE (4 * roundup_pow_of_two(MLX5E_CACHE_UNIT))
4415a031
TT
491struct mlx5e_page_cache {
492 u32 head;
493 u32 tail;
494 struct mlx5e_dma_info page_cache[MLX5E_CACHE_SIZE];
495};
496
eba2db2b
SM
497struct mlx5e_rq;
498typedef void (*mlx5e_fp_handle_rx_cqe)(struct mlx5e_rq*, struct mlx5_cqe64*);
619a8f2a
TT
499typedef struct sk_buff *
500(*mlx5e_fp_skb_from_cqe_mpwrq)(struct mlx5e_rq *rq, struct mlx5e_mpw_info *wi,
501 u16 cqe_bcnt, u32 head_offset, u32 page_idx);
069d1146
TT
502typedef struct sk_buff *
503(*mlx5e_fp_skb_from_cqe)(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe,
504 struct mlx5e_wqe_frag_info *wi, u32 cqe_bcnt);
7cc6d77b 505typedef bool (*mlx5e_fp_post_rx_wqes)(struct mlx5e_rq *rq);
eba2db2b
SM
506typedef void (*mlx5e_fp_dealloc_wqe)(struct mlx5e_rq*, u16);
507
121e8927
TT
508enum mlx5e_rq_flag {
509 MLX5E_RQ_FLAG_XDP_XMIT = BIT(0),
510};
511
069d1146
TT
512struct mlx5e_rq_frag_info {
513 int frag_size;
514 int frag_stride;
515};
516
517struct mlx5e_rq_frags_info {
518 struct mlx5e_rq_frag_info arr[MLX5E_MAX_RX_FRAGS];
519 u8 num_frags;
520 u8 log_num_frags;
521 u8 wqe_bulk;
522};
523
f62b8bb8
AV
524struct mlx5e_rq {
525 /* data path */
21c59685 526 union {
accd5883 527 struct {
069d1146
TT
528 struct mlx5_wq_cyc wq;
529 struct mlx5e_wqe_frag_info *frags;
530 struct mlx5e_dma_info *di;
531 struct mlx5e_rq_frags_info info;
532 mlx5e_fp_skb_from_cqe skb_from_cqe;
accd5883 533 } wqe;
21c59685 534 struct {
422d4c40 535 struct mlx5_wq_ll wq;
b8a98a4c 536 struct mlx5e_umr_wqe umr_wqe;
21c59685 537 struct mlx5e_mpw_info *info;
619a8f2a 538 mlx5e_fp_skb_from_cqe_mpwrq skb_from_cqe_mpwrq;
b45d8b50 539 u16 num_strides;
89e89f7a 540 u8 log_stride_sz;
a071cb9f 541 bool umr_in_progress;
21c59685
SM
542 } mpwqe;
543 };
1bfecfca 544 struct {
b45d8b50 545 u16 headroom;
b5503b99 546 u8 map_dir; /* dma map direction */
1bfecfca 547 } buff;
f62b8bb8 548
7cc6d77b 549 struct mlx5e_channel *channel;
f62b8bb8
AV
550 struct device *pdev;
551 struct net_device *netdev;
05909bab 552 struct mlx5e_rq_stats *stats;
f62b8bb8 553 struct mlx5e_cq cq;
4415a031 554 struct mlx5e_page_cache page_cache;
7c39afb3
FD
555 struct hwtstamp_config *tstamp;
556 struct mlx5_clock *clock;
4415a031 557
2f48af12 558 mlx5e_fp_handle_rx_cqe handle_rx_cqe;
7cc6d77b 559 mlx5e_fp_post_rx_wqes post_wqes;
6cd392a0 560 mlx5e_fp_dealloc_wqe dealloc_wqe;
f62b8bb8
AV
561
562 unsigned long state;
563 int ix;
564
9a317425 565 struct net_dim dim; /* Dynamic Interrupt Moderation */
31871f87
SM
566
567 /* XDP */
86994156 568 struct bpf_prog *xdp_prog;
31391048 569 struct mlx5e_xdpsq xdpsq;
121e8927 570 DECLARE_BITMAP(flags, 8);
60bbf7ee 571 struct page_pool *page_pool;
cb3c7fd4 572
f62b8bb8
AV
573 /* control */
574 struct mlx5_wq_ctrl wq_ctrl;
b45d8b50 575 __be32 mkey_be;
461017cb 576 u8 wq_type;
f62b8bb8 577 u32 rqn;
a43b25da 578 struct mlx5_core_dev *mdev;
ec8b9981 579 struct mlx5_core_mkey umr_mkey;
0ddf5432
JDB
580
581 /* XDP read-mostly */
582 struct xdp_rxq_info xdp_rxq;
f62b8bb8
AV
583} ____cacheline_aligned_in_smp;
584
f62b8bb8
AV
585struct mlx5e_channel {
586 /* data path */
587 struct mlx5e_rq rq;
31391048
SM
588 struct mlx5e_txqsq sq[MLX5E_MAX_NUM_TC];
589 struct mlx5e_icosq icosq; /* internal control operations */
b5503b99 590 bool xdp;
f62b8bb8
AV
591 struct napi_struct napi;
592 struct device *pdev;
593 struct net_device *netdev;
594 __be32 mkey_be;
595 u8 num_tc;
f62b8bb8 596
58b99ee3
TT
597 /* XDP_REDIRECT */
598 struct mlx5e_xdpsq xdpsq;
599
a8c2eb15
TT
600 /* data path - accessed per napi poll */
601 struct irq_desc *irq_desc;
05909bab 602 struct mlx5e_ch_stats *stats;
f62b8bb8
AV
603
604 /* control */
605 struct mlx5e_priv *priv;
a43b25da 606 struct mlx5_core_dev *mdev;
7c39afb3 607 struct hwtstamp_config *tstamp;
f62b8bb8 608 int ix;
231243c8 609 int cpu;
f62b8bb8
AV
610};
611
ff9c852f
SM
612struct mlx5e_channels {
613 struct mlx5e_channel **c;
614 unsigned int num;
6a9764ef 615 struct mlx5e_params params;
ff9c852f
SM
616};
617
05909bab
EBE
618struct mlx5e_channel_stats {
619 struct mlx5e_ch_stats ch;
620 struct mlx5e_sq_stats sq[MLX5E_MAX_NUM_TC];
621 struct mlx5e_rq_stats rq;
890388ad 622 struct mlx5e_xdpsq_stats rq_xdpsq;
58b99ee3 623 struct mlx5e_xdpsq_stats xdpsq;
05909bab
EBE
624} ____cacheline_aligned_in_smp;
625
acff797c 626enum {
e0f46eb9 627 MLX5E_STATE_ASYNC_EVENTS_ENABLED,
acff797c
MG
628 MLX5E_STATE_OPENED,
629 MLX5E_STATE_DESTROYING,
630};
631
398f3351 632struct mlx5e_rqt {
1da36696 633 u32 rqtn;
398f3351
HHZ
634 bool enabled;
635};
636
637struct mlx5e_tir {
638 u32 tirn;
639 struct mlx5e_rqt rqt;
640 struct list_head list;
1da36696
TT
641};
642
acff797c
MG
643enum {
644 MLX5E_TC_PRIO = 0,
645 MLX5E_NIC_PRIO
646};
647
f62b8bb8
AV
648struct mlx5e_priv {
649 /* priv data path fields - start */
acc6c595
SM
650 struct mlx5e_txqsq *txq2sq[MLX5E_MAX_NUM_CHANNELS * MLX5E_MAX_NUM_TC];
651 int channel_tc2txq[MLX5E_MAX_NUM_CHANNELS][MLX5E_MAX_NUM_TC];
2a5e7a13
HN
652#ifdef CONFIG_MLX5_CORE_EN_DCB
653 struct mlx5e_dcbx_dp dcbx_dp;
654#endif
f62b8bb8
AV
655 /* priv data path fields - end */
656
79c48764 657 u32 msglevel;
f62b8bb8
AV
658 unsigned long state;
659 struct mutex state_lock; /* Protects Interface state */
50cfa25a 660 struct mlx5e_rq drop_rq;
f62b8bb8 661
ff9c852f 662 struct mlx5e_channels channels;
f62b8bb8 663 u32 tisn[MLX5E_MAX_NUM_TC];
398f3351 664 struct mlx5e_rqt indir_rqt;
724b2aa1 665 struct mlx5e_tir indir_tir[MLX5E_NUM_INDIR_TIRS];
7b3722fa 666 struct mlx5e_tir inner_indir_tir[MLX5E_NUM_INDIR_TIRS];
724b2aa1 667 struct mlx5e_tir direct_tir[MLX5E_MAX_NUM_CHANNELS];
507f0c81 668 u32 tx_rates[MLX5E_MAX_NUM_SQS];
f62b8bb8 669
acff797c 670 struct mlx5e_flow_steering fs;
f62b8bb8 671
7bb29755 672 struct workqueue_struct *wq;
f62b8bb8
AV
673 struct work_struct update_carrier_work;
674 struct work_struct set_rx_mode_work;
3947ca18 675 struct work_struct tx_timeout_work;
f62b8bb8
AV
676 struct delayed_work update_stats_work;
677
678 struct mlx5_core_dev *mdev;
679 struct net_device *netdev;
680 struct mlx5e_stats stats;
05909bab
EBE
681 struct mlx5e_channel_stats channel_stats[MLX5E_MAX_NUM_CHANNELS];
682 u8 max_opened_tc;
7c39afb3 683 struct hwtstamp_config tstamp;
7cbaf9a3
MS
684 u16 q_counter;
685 u16 drop_rq_q_counter;
3a6a931d
HN
686#ifdef CONFIG_MLX5_CORE_EN_DCB
687 struct mlx5e_dcbx dcbx;
688#endif
689
6bfd390b 690 const struct mlx5e_profile *profile;
127ea380 691 void *ppriv;
547eede0
IT
692#ifdef CONFIG_MLX5_EN_IPSEC
693 struct mlx5e_ipsec *ipsec;
694#endif
43585a41
IL
695#ifdef CONFIG_MLX5_EN_TLS
696 struct mlx5e_tls *tls;
697#endif
f62b8bb8
AV
698};
699
a43b25da 700struct mlx5e_profile {
182570b2 701 int (*init)(struct mlx5_core_dev *mdev,
a43b25da
SM
702 struct net_device *netdev,
703 const struct mlx5e_profile *profile, void *ppriv);
704 void (*cleanup)(struct mlx5e_priv *priv);
705 int (*init_rx)(struct mlx5e_priv *priv);
706 void (*cleanup_rx)(struct mlx5e_priv *priv);
707 int (*init_tx)(struct mlx5e_priv *priv);
708 void (*cleanup_tx)(struct mlx5e_priv *priv);
709 void (*enable)(struct mlx5e_priv *priv);
710 void (*disable)(struct mlx5e_priv *priv);
711 void (*update_stats)(struct mlx5e_priv *priv);
7ca42c80 712 void (*update_carrier)(struct mlx5e_priv *priv);
a43b25da 713 int (*max_nch)(struct mlx5_core_dev *mdev);
20fd0c19
SM
714 struct {
715 mlx5e_fp_handle_rx_cqe handle_rx_cqe;
716 mlx5e_fp_handle_rx_cqe handle_rx_cqe_mpwqe;
717 } rx_handlers;
a43b25da
SM
718 int max_tc;
719};
720
665bc539
GP
721void mlx5e_build_ptys2ethtool_map(void);
722
f62b8bb8 723u16 mlx5e_select_queue(struct net_device *dev, struct sk_buff *skb,
4f49dec9
AD
724 struct net_device *sb_dev,
725 select_queue_fallback_t fallback);
f62b8bb8 726netdev_tx_t mlx5e_xmit(struct sk_buff *skb, struct net_device *dev);
bf239741
IL
727netdev_tx_t mlx5e_sq_xmit(struct mlx5e_txqsq *sq, struct sk_buff *skb,
728 struct mlx5e_tx_wqe *wqe, u16 pi);
f62b8bb8
AV
729
730void mlx5e_completion_event(struct mlx5_core_cq *mcq);
731void mlx5e_cq_error_event(struct mlx5_core_cq *mcq, enum mlx5_event event);
732int mlx5e_napi_poll(struct napi_struct *napi, int budget);
8ec736e5 733bool mlx5e_poll_tx_cq(struct mlx5e_cq *cq, int napi_budget);
44fb6fbb 734int mlx5e_poll_rx_cq(struct mlx5e_cq *cq, int budget);
31391048 735void mlx5e_free_txqsq_descs(struct mlx5e_txqsq *sq);
461017cb 736
2ccb0a79
TT
737bool mlx5e_check_fragmented_striding_rq_cap(struct mlx5_core_dev *mdev);
738bool mlx5e_striding_rq_possible(struct mlx5_core_dev *mdev,
739 struct mlx5e_params *params);
740
159d2131 741void mlx5e_page_dma_unmap(struct mlx5e_rq *rq, struct mlx5e_dma_info *dma_info);
4415a031
TT
742void mlx5e_page_release(struct mlx5e_rq *rq, struct mlx5e_dma_info *dma_info,
743 bool recycle);
2f48af12 744void mlx5e_handle_rx_cqe(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe);
461017cb 745void mlx5e_handle_rx_cqe_mpwrq(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe);
f62b8bb8 746bool mlx5e_post_rx_wqes(struct mlx5e_rq *rq);
7cc6d77b 747bool mlx5e_post_rx_mpwqes(struct mlx5e_rq *rq);
6cd392a0
DJ
748void mlx5e_dealloc_rx_wqe(struct mlx5e_rq *rq, u16 ix);
749void mlx5e_dealloc_rx_mpwqe(struct mlx5e_rq *rq, u16 ix);
619a8f2a
TT
750struct sk_buff *
751mlx5e_skb_from_cqe_mpwrq_linear(struct mlx5e_rq *rq, struct mlx5e_mpw_info *wi,
752 u16 cqe_bcnt, u32 head_offset, u32 page_idx);
753struct sk_buff *
754mlx5e_skb_from_cqe_mpwrq_nonlinear(struct mlx5e_rq *rq, struct mlx5e_mpw_info *wi,
755 u16 cqe_bcnt, u32 head_offset, u32 page_idx);
069d1146
TT
756struct sk_buff *
757mlx5e_skb_from_cqe_linear(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe,
758 struct mlx5e_wqe_frag_info *wi, u32 cqe_bcnt);
759struct sk_buff *
760mlx5e_skb_from_cqe_nonlinear(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe,
761 struct mlx5e_wqe_frag_info *wi, u32 cqe_bcnt);
f62b8bb8 762
19386177 763void mlx5e_update_stats(struct mlx5e_priv *priv);
f62b8bb8 764
33cfaaa8 765void mlx5e_init_l2_addr(struct mlx5e_priv *priv);
d605d668
KH
766int mlx5e_self_test_num(struct mlx5e_priv *priv);
767void mlx5e_self_test(struct net_device *ndev, struct ethtool_test *etest,
768 u64 *buf);
f62b8bb8
AV
769void mlx5e_set_rx_mode_work(struct work_struct *work);
770
1170fbd8
FD
771int mlx5e_hwstamp_set(struct mlx5e_priv *priv, struct ifreq *ifr);
772int mlx5e_hwstamp_get(struct mlx5e_priv *priv, struct ifreq *ifr);
be7e87f9 773int mlx5e_modify_rx_cqe_compression_locked(struct mlx5e_priv *priv, bool val);
ef9814de 774
f62b8bb8
AV
775int mlx5e_vlan_rx_add_vid(struct net_device *dev, __always_unused __be16 proto,
776 u16 vid);
777int mlx5e_vlan_rx_kill_vid(struct net_device *dev, __always_unused __be16 proto,
778 u16 vid);
237f258c 779void mlx5e_timestamp_init(struct mlx5e_priv *priv);
f62b8bb8 780
a5f97fee
SM
781struct mlx5e_redirect_rqt_param {
782 bool is_rss;
783 union {
784 u32 rqn; /* Direct RQN (Non-RSS) */
785 struct {
786 u8 hfunc;
787 struct mlx5e_channels *channels;
788 } rss; /* RSS data */
789 };
790};
791
792int mlx5e_redirect_rqt(struct mlx5e_priv *priv, u32 rqtn, int sz,
793 struct mlx5e_redirect_rqt_param rrp);
6a9764ef
SM
794void mlx5e_build_indir_tir_ctx_hash(struct mlx5e_params *params,
795 enum mlx5e_traffic_types tt,
7b3722fa 796 void *tirc, bool inner);
2d75b2bc 797
f62b8bb8
AV
798int mlx5e_open_locked(struct net_device *netdev);
799int mlx5e_close_locked(struct net_device *netdev);
55c2503d
SM
800
801int mlx5e_open_channels(struct mlx5e_priv *priv,
802 struct mlx5e_channels *chs);
803void mlx5e_close_channels(struct mlx5e_channels *chs);
2e20a151
SM
804
805/* Function pointer to be used to modify WH settings while
806 * switching channels
807 */
808typedef int (*mlx5e_fp_hw_modify)(struct mlx5e_priv *priv);
55c2503d 809void mlx5e_switch_priv_channels(struct mlx5e_priv *priv,
2e20a151
SM
810 struct mlx5e_channels *new_chs,
811 mlx5e_fp_hw_modify hw_modify);
603f4a45
SM
812void mlx5e_activate_priv_channels(struct mlx5e_priv *priv);
813void mlx5e_deactivate_priv_channels(struct mlx5e_priv *priv);
55c2503d 814
d4b6c488 815void mlx5e_build_default_indir_rqt(u32 *indirection_rqt, int len,
85082dba 816 int num_channels);
0088cbbc
TG
817void mlx5e_set_tx_cq_mode_params(struct mlx5e_params *params,
818 u8 cq_period_mode);
9908aa29
TT
819void mlx5e_set_rx_cq_mode_params(struct mlx5e_params *params,
820 u8 cq_period_mode);
2ccb0a79 821void mlx5e_set_rq_type(struct mlx5_core_dev *mdev, struct mlx5e_params *params);
696a97cf 822void mlx5e_init_rq_type_params(struct mlx5_core_dev *mdev,
2a0f561b 823 struct mlx5e_params *params);
9908aa29 824
7b3722fa
GP
825static inline bool mlx5e_tunnel_inner_ft_supported(struct mlx5_core_dev *mdev)
826{
827 return (MLX5_CAP_ETH(mdev, tunnel_stateless_gre) &&
828 MLX5_CAP_FLOWTABLE_NIC_RX(mdev, ft_field_support.inner_ip_version));
829}
830
bf239741
IL
831static inline void mlx5e_sq_fetch_wqe(struct mlx5e_txqsq *sq,
832 struct mlx5e_tx_wqe **wqe,
833 u16 *pi)
834{
ddf385e3 835 struct mlx5_wq_cyc *wq = &sq->wq;
bf239741 836
ddf385e3 837 *pi = mlx5_wq_cyc_ctr2ix(wq, sq->pc);
bf239741
IL
838 *wqe = mlx5_wq_cyc_get_wqe(wq, *pi);
839 memset(*wqe, 0, sizeof(**wqe));
840}
841
864b2d71
SM
842static inline
843struct mlx5e_tx_wqe *mlx5e_post_nop(struct mlx5_wq_cyc *wq, u32 sqn, u16 *pc)
f62b8bb8 844{
ddf385e3 845 u16 pi = mlx5_wq_cyc_ctr2ix(wq, *pc);
864b2d71
SM
846 struct mlx5e_tx_wqe *wqe = mlx5_wq_cyc_get_wqe(wq, pi);
847 struct mlx5_wqe_ctrl_seg *cseg = &wqe->ctrl;
848
849 memset(cseg, 0, sizeof(*cseg));
850
851 cseg->opmod_idx_opcode = cpu_to_be32((*pc << 8) | MLX5_OPCODE_NOP);
852 cseg->qpn_ds = cpu_to_be32((sqn << 8) | 0x01);
853
854 (*pc)++;
855
856 return wqe;
857}
858
859static inline
860void mlx5e_notify_hw(struct mlx5_wq_cyc *wq, u16 pc,
861 void __iomem *uar_map,
862 struct mlx5_wqe_ctrl_seg *ctrl)
863{
864 ctrl->fm_ce_se = MLX5_WQE_CTRL_CQ_UPDATE;
f62b8bb8
AV
865 /* ensure wqe is visible to device before updating doorbell record */
866 dma_wmb();
867
864b2d71 868 *wq->db = cpu_to_be32(pc);
f62b8bb8
AV
869
870 /* ensure doorbell record is visible to device before ringing the
871 * doorbell
872 */
873 wmb();
f62b8bb8 874
864b2d71 875 mlx5_write64((__be32 *)ctrl, uar_map, NULL);
f62b8bb8
AV
876}
877
878static inline void mlx5e_cq_arm(struct mlx5e_cq *cq)
879{
880 struct mlx5_core_cq *mcq;
881
882 mcq = &cq->mcq;
5fe9dec0 883 mlx5_cq_arm(mcq, MLX5_CQ_DB_REQ_NOT, mcq->uar->map, cq->wq.cc);
f62b8bb8
AV
884}
885
886extern const struct ethtool_ops mlx5e_ethtool_ops;
08fb1dac
SM
887#ifdef CONFIG_MLX5_CORE_EN_DCB
888extern const struct dcbnl_rtnl_ops mlx5e_dcbnl_ops;
889int mlx5e_dcbnl_ieee_setets_core(struct mlx5e_priv *priv, struct ieee_ets *ets);
e207b7e9 890void mlx5e_dcbnl_initialize(struct mlx5e_priv *priv);
2a5e7a13
HN
891void mlx5e_dcbnl_init_app(struct mlx5e_priv *priv);
892void mlx5e_dcbnl_delete_app(struct mlx5e_priv *priv);
08fb1dac
SM
893#endif
894
724b2aa1
HHZ
895int mlx5e_create_tir(struct mlx5_core_dev *mdev,
896 struct mlx5e_tir *tir, u32 *in, int inlen);
897void mlx5e_destroy_tir(struct mlx5_core_dev *mdev,
898 struct mlx5e_tir *tir);
b50d292b
HHZ
899int mlx5e_create_mdev_resources(struct mlx5_core_dev *mdev);
900void mlx5e_destroy_mdev_resources(struct mlx5_core_dev *mdev);
b676f653 901int mlx5e_refresh_tirs(struct mlx5e_priv *priv, bool enable_uc_lb);
1afff42c 902
bc81b9d3 903/* common netdev helpers */
1462e48d
RD
904void mlx5e_create_q_counters(struct mlx5e_priv *priv);
905void mlx5e_destroy_q_counters(struct mlx5e_priv *priv);
906int mlx5e_open_drop_rq(struct mlx5e_priv *priv,
907 struct mlx5e_rq *drop_rq);
908void mlx5e_close_drop_rq(struct mlx5e_rq *drop_rq);
909
8f493ffd
SM
910int mlx5e_create_indirect_rqt(struct mlx5e_priv *priv);
911
46dc933c
OG
912int mlx5e_create_indirect_tirs(struct mlx5e_priv *priv, bool inner_ttc);
913void mlx5e_destroy_indirect_tirs(struct mlx5e_priv *priv, bool inner_ttc);
8f493ffd 914
cb67b832 915int mlx5e_create_direct_rqts(struct mlx5e_priv *priv);
8f493ffd 916void mlx5e_destroy_direct_rqts(struct mlx5e_priv *priv);
cb67b832
HHZ
917int mlx5e_create_direct_tirs(struct mlx5e_priv *priv);
918void mlx5e_destroy_direct_tirs(struct mlx5e_priv *priv);
8f493ffd
SM
919void mlx5e_destroy_rqt(struct mlx5e_priv *priv, struct mlx5e_rqt *rqt);
920
5426a0b2
SM
921int mlx5e_create_tis(struct mlx5_core_dev *mdev, int tc,
922 u32 underlay_qpn, u32 *tisn);
923void mlx5e_destroy_tis(struct mlx5_core_dev *mdev, u32 tisn);
924
cb67b832
HHZ
925int mlx5e_create_tises(struct mlx5e_priv *priv);
926void mlx5e_cleanup_nic_tx(struct mlx5e_priv *priv);
927int mlx5e_close(struct net_device *netdev);
928int mlx5e_open(struct net_device *netdev);
cb67b832 929
3f6d08d1
OG
930int mlx5e_bits_invert(unsigned long a, int size);
931
250a42b6
AN
932typedef int (*change_hw_mtu_cb)(struct mlx5e_priv *priv);
933int mlx5e_change_mtu(struct net_device *netdev, int new_mtu,
934 change_hw_mtu_cb set_mtu_cb);
935
076b0936
ES
936/* ethtool helpers */
937void mlx5e_ethtool_get_drvinfo(struct mlx5e_priv *priv,
938 struct ethtool_drvinfo *drvinfo);
939void mlx5e_ethtool_get_strings(struct mlx5e_priv *priv,
940 uint32_t stringset, uint8_t *data);
941int mlx5e_ethtool_get_sset_count(struct mlx5e_priv *priv, int sset);
942void mlx5e_ethtool_get_ethtool_stats(struct mlx5e_priv *priv,
943 struct ethtool_stats *stats, u64 *data);
944void mlx5e_ethtool_get_ringparam(struct mlx5e_priv *priv,
945 struct ethtool_ringparam *param);
946int mlx5e_ethtool_set_ringparam(struct mlx5e_priv *priv,
947 struct ethtool_ringparam *param);
948void mlx5e_ethtool_get_channels(struct mlx5e_priv *priv,
949 struct ethtool_channels *ch);
950int mlx5e_ethtool_set_channels(struct mlx5e_priv *priv,
951 struct ethtool_channels *ch);
952int mlx5e_ethtool_get_coalesce(struct mlx5e_priv *priv,
953 struct ethtool_coalesce *coal);
954int mlx5e_ethtool_set_coalesce(struct mlx5e_priv *priv,
955 struct ethtool_coalesce *coal);
a5355de8
OG
956u32 mlx5e_ethtool_get_rxfh_key_size(struct mlx5e_priv *priv);
957u32 mlx5e_ethtool_get_rxfh_indir_size(struct mlx5e_priv *priv);
3844b07e
FD
958int mlx5e_ethtool_get_ts_info(struct mlx5e_priv *priv,
959 struct ethtool_ts_info *info);
3ffaabec
OG
960int mlx5e_ethtool_flash_device(struct mlx5e_priv *priv,
961 struct ethtool_flash *flash);
076b0936 962
2c3b5bee 963/* mlx5e generic netdev management API */
182570b2
FD
964int mlx5e_netdev_init(struct net_device *netdev, struct mlx5e_priv *priv);
965void mlx5e_netdev_cleanup(struct net_device *netdev, struct mlx5e_priv *priv);
2c3b5bee
SM
966struct net_device*
967mlx5e_create_netdev(struct mlx5_core_dev *mdev, const struct mlx5e_profile *profile,
968 void *ppriv);
969int mlx5e_attach_netdev(struct mlx5e_priv *priv);
970void mlx5e_detach_netdev(struct mlx5e_priv *priv);
971void mlx5e_destroy_netdev(struct mlx5e_priv *priv);
8f493ffd
SM
972void mlx5e_build_nic_params(struct mlx5_core_dev *mdev,
973 struct mlx5e_params *params,
472a1e44 974 u16 max_channels, u16 mtu);
749359f4
GT
975void mlx5e_build_rq_params(struct mlx5_core_dev *mdev,
976 struct mlx5e_params *params);
3edc0159 977void mlx5e_build_rss_params(struct mlx5e_params *params);
fbcb127e 978u8 mlx5e_params_calculate_tx_min_inline(struct mlx5_core_dev *mdev);
9a317425 979void mlx5e_rx_dim_work(struct work_struct *work);
cbce4f44 980void mlx5e_tx_dim_work(struct work_struct *work);
1afff42c 981#endif /* __MLX5_EN_H__ */