]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blame - drivers/net/ethernet/mellanox/mlx5/core/en_main.c
UBUNTU: Ubuntu-4.15.0-96.97
[mirror_ubuntu-bionic-kernel.git] / drivers / net / ethernet / mellanox / mlx5 / core / en_main.c
CommitLineData
f62b8bb8 1/*
b3f63c3d 2 * Copyright (c) 2015-2016, Mellanox Technologies. All rights reserved.
f62b8bb8
AV
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
32
e8f887ac
AV
33#include <net/tc_act/tc_gact.h>
34#include <net/pkt_cls.h>
86d722ad 35#include <linux/mlx5/fs.h>
b3f63c3d 36#include <net/vxlan.h>
86994156 37#include <linux/bpf.h>
1d447a39 38#include "eswitch.h"
f62b8bb8 39#include "en.h"
e8f887ac 40#include "en_tc.h"
1d447a39 41#include "en_rep.h"
547eede0 42#include "en_accel/ipsec.h"
899a59d3
IT
43#include "en_accel/ipsec_rxtx.h"
44#include "accel/ipsec.h"
b3f63c3d 45#include "vxlan.h"
f62b8bb8
AV
46
47struct mlx5e_rq_param {
cb3c7fd4
GR
48 u32 rqc[MLX5_ST_SZ_DW(rqc)];
49 struct mlx5_wq_param wq;
f62b8bb8
AV
50};
51
52struct mlx5e_sq_param {
53 u32 sqc[MLX5_ST_SZ_DW(sqc)];
54 struct mlx5_wq_param wq;
55};
56
57struct mlx5e_cq_param {
58 u32 cqc[MLX5_ST_SZ_DW(cqc)];
59 struct mlx5_wq_param wq;
60 u16 eq_ix;
9908aa29 61 u8 cq_period_mode;
f62b8bb8
AV
62};
63
64struct mlx5e_channel_param {
65 struct mlx5e_rq_param rq;
66 struct mlx5e_sq_param sq;
b5503b99 67 struct mlx5e_sq_param xdp_sq;
d3c9bc27 68 struct mlx5e_sq_param icosq;
f62b8bb8
AV
69 struct mlx5e_cq_param rx_cq;
70 struct mlx5e_cq_param tx_cq;
d3c9bc27 71 struct mlx5e_cq_param icosq_cq;
f62b8bb8
AV
72};
73
2fc4bfb7
SM
74static bool mlx5e_check_fragmented_striding_rq_cap(struct mlx5_core_dev *mdev)
75{
76 return MLX5_CAP_GEN(mdev, striding_rq) &&
77 MLX5_CAP_GEN(mdev, umr_ptr_rlky) &&
78 MLX5_CAP_ETH(mdev, reg_umr_sq);
79}
80
696a97cf
EE
81void mlx5e_init_rq_type_params(struct mlx5_core_dev *mdev,
82 struct mlx5e_params *params, u8 rq_type)
2fc4bfb7 83{
6a9764ef
SM
84 params->rq_wq_type = rq_type;
85 params->lro_wqe_sz = MLX5E_PARAMS_DEFAULT_LRO_WQE_SZ;
86 switch (params->rq_wq_type) {
2fc4bfb7 87 case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ:
6a9764ef 88 params->log_rq_size = is_kdump_kernel() ?
b4e029da
KH
89 MLX5E_PARAMS_MINIMUM_LOG_RQ_SIZE_MPW :
90 MLX5E_PARAMS_DEFAULT_LOG_RQ_SIZE_MPW;
696a97cf
EE
91 params->mpwqe_log_stride_sz = MLX5E_MPWQE_STRIDE_SZ(mdev,
92 MLX5E_GET_PFLAG(params, MLX5E_PFLAG_RX_CQE_COMPRESS));
6a9764ef
SM
93 params->mpwqe_log_num_strides = MLX5_MPWRQ_LOG_WQE_SZ -
94 params->mpwqe_log_stride_sz;
2fc4bfb7
SM
95 break;
96 default: /* MLX5_WQ_TYPE_LINKED_LIST */
6a9764ef 97 params->log_rq_size = is_kdump_kernel() ?
b4e029da
KH
98 MLX5E_PARAMS_MINIMUM_LOG_RQ_SIZE :
99 MLX5E_PARAMS_DEFAULT_LOG_RQ_SIZE;
bce2b2bf
TT
100 params->rq_headroom = params->xdp_prog ?
101 XDP_PACKET_HEADROOM : MLX5_RX_HEADROOM;
102 params->rq_headroom += NET_IP_ALIGN;
4078e637
TT
103
104 /* Extra room needed for build_skb */
bce2b2bf 105 params->lro_wqe_sz -= params->rq_headroom +
4078e637 106 SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
2fc4bfb7 107 }
2fc4bfb7 108
6a9764ef
SM
109 mlx5_core_info(mdev, "MLX5E: StrdRq(%d) RqSz(%ld) StrdSz(%ld) RxCqeCmprss(%d)\n",
110 params->rq_wq_type == MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ,
111 BIT(params->log_rq_size),
112 BIT(params->mpwqe_log_stride_sz),
113 MLX5E_GET_PFLAG(params, MLX5E_PFLAG_RX_CQE_COMPRESS));
2fc4bfb7
SM
114}
115
696a97cf
EE
116static void mlx5e_set_rq_params(struct mlx5_core_dev *mdev,
117 struct mlx5e_params *params)
2fc4bfb7 118{
6a9764ef 119 u8 rq_type = mlx5e_check_fragmented_striding_rq_cap(mdev) &&
899a59d3 120 !params->xdp_prog && !MLX5_IPSEC_DEV(mdev) ?
2fc4bfb7
SM
121 MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ :
122 MLX5_WQ_TYPE_LINKED_LIST;
696a97cf 123 mlx5e_init_rq_type_params(mdev, params, rq_type);
2fc4bfb7
SM
124}
125
f62b8bb8
AV
126static void mlx5e_update_carrier(struct mlx5e_priv *priv)
127{
128 struct mlx5_core_dev *mdev = priv->mdev;
129 u8 port_state;
130
131 port_state = mlx5_query_vport_state(mdev,
e53eef63
OG
132 MLX5_QUERY_VPORT_STATE_IN_OP_MOD_VNIC_VPORT,
133 0);
f62b8bb8 134
87424ad5
SD
135 if (port_state == VPORT_STATE_UP) {
136 netdev_info(priv->netdev, "Link up\n");
f62b8bb8 137 netif_carrier_on(priv->netdev);
87424ad5
SD
138 } else {
139 netdev_info(priv->netdev, "Link down\n");
f62b8bb8 140 netif_carrier_off(priv->netdev);
87424ad5 141 }
f62b8bb8
AV
142}
143
144static void mlx5e_update_carrier_work(struct work_struct *work)
145{
146 struct mlx5e_priv *priv = container_of(work, struct mlx5e_priv,
147 update_carrier_work);
148
149 mutex_lock(&priv->state_lock);
150 if (test_bit(MLX5E_STATE_OPENED, &priv->state))
7ca42c80
ES
151 if (priv->profile->update_carrier)
152 priv->profile->update_carrier(priv);
f62b8bb8
AV
153 mutex_unlock(&priv->state_lock);
154}
155
3947ca18
DJ
156static void mlx5e_tx_timeout_work(struct work_struct *work)
157{
158 struct mlx5e_priv *priv = container_of(work, struct mlx5e_priv,
159 tx_timeout_work);
160 int err;
161
162 rtnl_lock();
163 mutex_lock(&priv->state_lock);
164 if (!test_bit(MLX5E_STATE_OPENED, &priv->state))
165 goto unlock;
166 mlx5e_close_locked(priv->netdev);
167 err = mlx5e_open_locked(priv->netdev);
168 if (err)
169 netdev_err(priv->netdev, "mlx5e_open_locked failed recovering from a tx_timeout, err(%d).\n",
170 err);
171unlock:
172 mutex_unlock(&priv->state_lock);
173 rtnl_unlock();
174}
175
9218b44d 176static void mlx5e_update_sw_counters(struct mlx5e_priv *priv)
f62b8bb8 177{
1510d728 178 struct mlx5e_sw_stats temp, *s = &temp;
f62b8bb8
AV
179 struct mlx5e_rq_stats *rq_stats;
180 struct mlx5e_sq_stats *sq_stats;
f62b8bb8
AV
181 int i, j;
182
9218b44d 183 memset(s, 0, sizeof(*s));
ff9c852f
SM
184 for (i = 0; i < priv->channels.num; i++) {
185 struct mlx5e_channel *c = priv->channels.c[i];
186
187 rq_stats = &c->rq.stats;
f62b8bb8 188
faf4478b
GP
189 s->rx_packets += rq_stats->packets;
190 s->rx_bytes += rq_stats->bytes;
bfe6d8d1
GP
191 s->rx_lro_packets += rq_stats->lro_packets;
192 s->rx_lro_bytes += rq_stats->lro_bytes;
4f4b0bf1 193 s->rx_ecn_mark += rq_stats->ecn_mark;
f24686e8 194 s->rx_removed_vlan_packets += rq_stats->removed_vlan_packets;
f62b8bb8 195 s->rx_csum_none += rq_stats->csum_none;
bfe6d8d1 196 s->rx_csum_complete += rq_stats->csum_complete;
d1a32cb2
SM
197 s->rx_csum_complete_tail += rq_stats->csum_complete_tail;
198 s->rx_csum_complete_tail_slow += rq_stats->csum_complete_tail_slow;
603e1f5b 199 s->rx_csum_unnecessary += rq_stats->csum_unnecessary;
bfe6d8d1 200 s->rx_csum_unnecessary_inner += rq_stats->csum_unnecessary_inner;
86994156 201 s->rx_xdp_drop += rq_stats->xdp_drop;
b5503b99
SM
202 s->rx_xdp_tx += rq_stats->xdp_tx;
203 s->rx_xdp_tx_full += rq_stats->xdp_tx_full;
f62b8bb8 204 s->rx_wqe_err += rq_stats->wqe_err;
461017cb 205 s->rx_mpwqe_filler += rq_stats->mpwqe_filler;
54984407 206 s->rx_buff_alloc_err += rq_stats->buff_alloc_err;
7219ab34
TT
207 s->rx_cqe_compress_blks += rq_stats->cqe_compress_blks;
208 s->rx_cqe_compress_pkts += rq_stats->cqe_compress_pkts;
accd5883 209 s->rx_page_reuse += rq_stats->page_reuse;
4415a031
TT
210 s->rx_cache_reuse += rq_stats->cache_reuse;
211 s->rx_cache_full += rq_stats->cache_full;
212 s->rx_cache_empty += rq_stats->cache_empty;
213 s->rx_cache_busy += rq_stats->cache_busy;
70871f1e 214 s->rx_cache_waive += rq_stats->cache_waive;
f62b8bb8 215
6a9764ef 216 for (j = 0; j < priv->channels.params.num_tc; j++) {
ff9c852f 217 sq_stats = &c->sq[j].stats;
f62b8bb8 218
faf4478b
GP
219 s->tx_packets += sq_stats->packets;
220 s->tx_bytes += sq_stats->bytes;
bfe6d8d1
GP
221 s->tx_tso_packets += sq_stats->tso_packets;
222 s->tx_tso_bytes += sq_stats->tso_bytes;
223 s->tx_tso_inner_packets += sq_stats->tso_inner_packets;
224 s->tx_tso_inner_bytes += sq_stats->tso_inner_bytes;
f24686e8 225 s->tx_added_vlan_packets += sq_stats->added_vlan_packets;
f62b8bb8
AV
226 s->tx_queue_stopped += sq_stats->stopped;
227 s->tx_queue_wake += sq_stats->wake;
228 s->tx_queue_dropped += sq_stats->dropped;
c8cf78fe 229 s->tx_xmit_more += sq_stats->xmit_more;
bfe6d8d1 230 s->tx_csum_partial_inner += sq_stats->csum_partial_inner;
603e1f5b
GP
231 s->tx_csum_none += sq_stats->csum_none;
232 s->tx_csum_partial += sq_stats->csum_partial;
f62b8bb8
AV
233 }
234 }
235
bfe6d8d1 236 s->link_down_events_phy = MLX5_GET(ppcnt_reg,
121fcdc8
GP
237 priv->stats.pport.phy_counters,
238 counter_set.phys_layer_cntrs.link_down_events);
1510d728 239 memcpy(&priv->stats.sw, s, sizeof(*s));
9218b44d
GP
240}
241
242static void mlx5e_update_vport_counters(struct mlx5e_priv *priv)
243{
244 int outlen = MLX5_ST_SZ_BYTES(query_vport_counter_out);
245 u32 *out = (u32 *)priv->stats.vport.query_vport_out;
c4f287c4 246 u32 in[MLX5_ST_SZ_DW(query_vport_counter_in)] = {0};
9218b44d
GP
247 struct mlx5_core_dev *mdev = priv->mdev;
248
f62b8bb8
AV
249 MLX5_SET(query_vport_counter_in, in, opcode,
250 MLX5_CMD_OP_QUERY_VPORT_COUNTER);
251 MLX5_SET(query_vport_counter_in, in, op_mod, 0);
252 MLX5_SET(query_vport_counter_in, in, other_vport, 0);
253
9218b44d
GP
254 mlx5_cmd_exec(mdev, in, sizeof(in), out, outlen);
255}
256
3834a5e6 257static void mlx5e_update_pport_counters(struct mlx5e_priv *priv, bool full)
9218b44d
GP
258{
259 struct mlx5e_pport_stats *pstats = &priv->stats.pport;
260 struct mlx5_core_dev *mdev = priv->mdev;
0883b4f4 261 u32 in[MLX5_ST_SZ_DW(ppcnt_reg)] = {0};
9218b44d 262 int sz = MLX5_ST_SZ_BYTES(ppcnt_reg);
cf678570 263 int prio;
9218b44d 264 void *out;
f62b8bb8 265
9218b44d 266 MLX5_SET(ppcnt_reg, in, local_port, 1);
f62b8bb8 267
9218b44d
GP
268 out = pstats->IEEE_802_3_counters;
269 MLX5_SET(ppcnt_reg, in, grp, MLX5_IEEE_802_3_COUNTERS_GROUP);
270 mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_PPCNT, 0, 0);
f62b8bb8 271
3834a5e6
GP
272 if (!full)
273 return;
274
9218b44d
GP
275 out = pstats->RFC_2863_counters;
276 MLX5_SET(ppcnt_reg, in, grp, MLX5_RFC_2863_COUNTERS_GROUP);
277 mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_PPCNT, 0, 0);
278
279 out = pstats->RFC_2819_counters;
280 MLX5_SET(ppcnt_reg, in, grp, MLX5_RFC_2819_COUNTERS_GROUP);
281 mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_PPCNT, 0, 0);
593cf338 282
121fcdc8
GP
283 out = pstats->phy_counters;
284 MLX5_SET(ppcnt_reg, in, grp, MLX5_PHYSICAL_LAYER_COUNTERS_GROUP);
285 mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_PPCNT, 0, 0);
286
5db0a4f6
GP
287 if (MLX5_CAP_PCAM_FEATURE(mdev, ppcnt_statistical_group)) {
288 out = pstats->phy_statistical_counters;
289 MLX5_SET(ppcnt_reg, in, grp, MLX5_PHYSICAL_LAYER_STATISTICAL_GROUP);
290 mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_PPCNT, 0, 0);
291 }
292
068aef33
GP
293 if (MLX5_CAP_PCAM_FEATURE(mdev, rx_buffer_fullness_counters)) {
294 out = pstats->eth_ext_counters;
295 MLX5_SET(ppcnt_reg, in, grp, MLX5_ETHERNET_EXTENDED_COUNTERS_GROUP);
5db0a4f6
GP
296 mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_PPCNT, 0, 0);
297 }
298
cf678570
GP
299 MLX5_SET(ppcnt_reg, in, grp, MLX5_PER_PRIORITY_COUNTERS_GROUP);
300 for (prio = 0; prio < NUM_PPORT_PRIO; prio++) {
301 out = pstats->per_prio_counters[prio];
302 MLX5_SET(ppcnt_reg, in, prio_tc, prio);
303 mlx5_core_access_reg(mdev, in, sz, out, sz,
304 MLX5_REG_PPCNT, 0, 0);
305 }
9218b44d
GP
306}
307
308static void mlx5e_update_q_counter(struct mlx5e_priv *priv)
309{
310 struct mlx5e_qcounter_stats *qcnt = &priv->stats.qcnt;
432609a4
GP
311 u32 out[MLX5_ST_SZ_DW(query_q_counter_out)];
312 int err;
9218b44d
GP
313
314 if (!priv->q_counter)
315 return;
316
432609a4
GP
317 err = mlx5_core_query_q_counter(priv->mdev, priv->q_counter, 0, out, sizeof(out));
318 if (err)
319 return;
320
321 qcnt->rx_out_of_buffer = MLX5_GET(query_q_counter_out, out, out_of_buffer);
9218b44d
GP
322}
323
0f7f3481
GP
324static void mlx5e_update_pcie_counters(struct mlx5e_priv *priv)
325{
326 struct mlx5e_pcie_stats *pcie_stats = &priv->stats.pcie;
327 struct mlx5_core_dev *mdev = priv->mdev;
0883b4f4 328 u32 in[MLX5_ST_SZ_DW(mpcnt_reg)] = {0};
0f7f3481
GP
329 int sz = MLX5_ST_SZ_BYTES(mpcnt_reg);
330 void *out;
0f7f3481
GP
331
332 if (!MLX5_CAP_MCAM_FEATURE(mdev, pcie_performance_group))
333 return;
334
0f7f3481
GP
335 out = pcie_stats->pcie_perf_counters;
336 MLX5_SET(mpcnt_reg, in, grp, MLX5_PCIE_PERFORMANCE_COUNTERS_GROUP);
337 mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_MPCNT, 0, 0);
0f7f3481
GP
338}
339
3834a5e6 340void mlx5e_update_stats(struct mlx5e_priv *priv, bool full)
9218b44d 341{
164f16f7 342 if (full) {
3834a5e6 343 mlx5e_update_pcie_counters(priv);
164f16f7
IT
344 mlx5e_ipsec_update_stats(priv);
345 }
3834a5e6 346 mlx5e_update_pport_counters(priv, full);
3dd69e3d
SM
347 mlx5e_update_vport_counters(priv);
348 mlx5e_update_q_counter(priv);
121fcdc8 349 mlx5e_update_sw_counters(priv);
f62b8bb8
AV
350}
351
3834a5e6
GP
352static void mlx5e_update_ndo_stats(struct mlx5e_priv *priv)
353{
354 mlx5e_update_stats(priv, false);
355}
356
cb67b832 357void mlx5e_update_stats_work(struct work_struct *work)
f62b8bb8
AV
358{
359 struct delayed_work *dwork = to_delayed_work(work);
360 struct mlx5e_priv *priv = container_of(dwork, struct mlx5e_priv,
361 update_stats_work);
362 mutex_lock(&priv->state_lock);
363 if (test_bit(MLX5E_STATE_OPENED, &priv->state)) {
6bfd390b 364 priv->profile->update_stats(priv);
7bb29755
MF
365 queue_delayed_work(priv->wq, dwork,
366 msecs_to_jiffies(MLX5E_UPDATE_STATS_INTERVAL));
f62b8bb8
AV
367 }
368 mutex_unlock(&priv->state_lock);
369}
370
daa21560
TT
371static void mlx5e_async_event(struct mlx5_core_dev *mdev, void *vpriv,
372 enum mlx5_dev_event event, unsigned long param)
f62b8bb8 373{
daa21560
TT
374 struct mlx5e_priv *priv = vpriv;
375
e0f46eb9 376 if (!test_bit(MLX5E_STATE_ASYNC_EVENTS_ENABLED, &priv->state))
daa21560
TT
377 return;
378
f62b8bb8
AV
379 switch (event) {
380 case MLX5_DEV_EVENT_PORT_UP:
381 case MLX5_DEV_EVENT_PORT_DOWN:
7bb29755 382 queue_work(priv->wq, &priv->update_carrier_work);
f62b8bb8 383 break;
f62b8bb8
AV
384 default:
385 break;
386 }
387}
388
f62b8bb8
AV
389static void mlx5e_enable_async_events(struct mlx5e_priv *priv)
390{
e0f46eb9 391 set_bit(MLX5E_STATE_ASYNC_EVENTS_ENABLED, &priv->state);
f62b8bb8
AV
392}
393
394static void mlx5e_disable_async_events(struct mlx5e_priv *priv)
395{
e0f46eb9 396 clear_bit(MLX5E_STATE_ASYNC_EVENTS_ENABLED, &priv->state);
78249c42 397 synchronize_irq(pci_irq_vector(priv->mdev->pdev, MLX5_EQ_VEC_ASYNC));
f62b8bb8
AV
398}
399
7e426671
TT
400static inline int mlx5e_get_wqe_mtt_sz(void)
401{
402 /* UMR copies MTTs in units of MLX5_UMR_MTT_ALIGNMENT bytes.
403 * To avoid copying garbage after the mtt array, we allocate
404 * a little more.
405 */
406 return ALIGN(MLX5_MPWRQ_PAGES_PER_WQE * sizeof(__be64),
407 MLX5_UMR_MTT_ALIGNMENT);
408}
409
31391048
SM
410static inline void mlx5e_build_umr_wqe(struct mlx5e_rq *rq,
411 struct mlx5e_icosq *sq,
412 struct mlx5e_umr_wqe *wqe,
413 u16 ix)
7e426671
TT
414{
415 struct mlx5_wqe_ctrl_seg *cseg = &wqe->ctrl;
416 struct mlx5_wqe_umr_ctrl_seg *ucseg = &wqe->uctrl;
417 struct mlx5_wqe_data_seg *dseg = &wqe->data;
21c59685 418 struct mlx5e_mpw_info *wi = &rq->mpwqe.info[ix];
7e426671
TT
419 u8 ds_cnt = DIV_ROUND_UP(sizeof(*wqe), MLX5_SEND_WQE_DS);
420 u32 umr_wqe_mtt_offset = mlx5e_get_wqe_mtt_offset(rq, ix);
421
422 cseg->qpn_ds = cpu_to_be32((sq->sqn << MLX5_WQE_CTRL_QPN_SHIFT) |
423 ds_cnt);
424 cseg->fm_ce_se = MLX5_WQE_CTRL_CQ_UPDATE;
425 cseg->imm = rq->mkey_be;
426
427 ucseg->flags = MLX5_UMR_TRANSLATION_OFFSET_EN;
31616255 428 ucseg->xlt_octowords =
7e426671
TT
429 cpu_to_be16(MLX5_MTT_OCTW(MLX5_MPWRQ_PAGES_PER_WQE));
430 ucseg->bsf_octowords =
431 cpu_to_be16(MLX5_MTT_OCTW(umr_wqe_mtt_offset));
432 ucseg->mkey_mask = cpu_to_be64(MLX5_MKEY_MASK_FREE);
433
434 dseg->lkey = sq->mkey_be;
435 dseg->addr = cpu_to_be64(wi->umr.mtt_addr);
436}
437
438static int mlx5e_rq_alloc_mpwqe_info(struct mlx5e_rq *rq,
439 struct mlx5e_channel *c)
440{
441 int wq_sz = mlx5_wq_ll_get_size(&rq->wq);
442 int mtt_sz = mlx5e_get_wqe_mtt_sz();
443 int mtt_alloc = mtt_sz + MLX5_UMR_ALIGN - 1;
444 int i;
445
21c59685 446 rq->mpwqe.info = kzalloc_node(wq_sz * sizeof(*rq->mpwqe.info),
231243c8 447 GFP_KERNEL, cpu_to_node(c->cpu));
21c59685 448 if (!rq->mpwqe.info)
7e426671
TT
449 goto err_out;
450
451 /* We allocate more than mtt_sz as we will align the pointer */
231243c8
SM
452 rq->mpwqe.mtt_no_align = kzalloc_node(mtt_alloc * wq_sz, GFP_KERNEL,
453 cpu_to_node(c->cpu));
21c59685 454 if (unlikely(!rq->mpwqe.mtt_no_align))
7e426671
TT
455 goto err_free_wqe_info;
456
457 for (i = 0; i < wq_sz; i++) {
21c59685 458 struct mlx5e_mpw_info *wi = &rq->mpwqe.info[i];
7e426671 459
21c59685 460 wi->umr.mtt = PTR_ALIGN(rq->mpwqe.mtt_no_align + i * mtt_alloc,
7e426671
TT
461 MLX5_UMR_ALIGN);
462 wi->umr.mtt_addr = dma_map_single(c->pdev, wi->umr.mtt, mtt_sz,
463 PCI_DMA_TODEVICE);
464 if (unlikely(dma_mapping_error(c->pdev, wi->umr.mtt_addr)))
465 goto err_unmap_mtts;
466
467 mlx5e_build_umr_wqe(rq, &c->icosq, &wi->umr.wqe, i);
468 }
469
470 return 0;
471
472err_unmap_mtts:
473 while (--i >= 0) {
21c59685 474 struct mlx5e_mpw_info *wi = &rq->mpwqe.info[i];
7e426671
TT
475
476 dma_unmap_single(c->pdev, wi->umr.mtt_addr, mtt_sz,
477 PCI_DMA_TODEVICE);
478 }
21c59685 479 kfree(rq->mpwqe.mtt_no_align);
7e426671 480err_free_wqe_info:
21c59685 481 kfree(rq->mpwqe.info);
7e426671
TT
482
483err_out:
484 return -ENOMEM;
485}
486
487static void mlx5e_rq_free_mpwqe_info(struct mlx5e_rq *rq)
488{
489 int wq_sz = mlx5_wq_ll_get_size(&rq->wq);
490 int mtt_sz = mlx5e_get_wqe_mtt_sz();
491 int i;
492
493 for (i = 0; i < wq_sz; i++) {
21c59685 494 struct mlx5e_mpw_info *wi = &rq->mpwqe.info[i];
7e426671
TT
495
496 dma_unmap_single(rq->pdev, wi->umr.mtt_addr, mtt_sz,
497 PCI_DMA_TODEVICE);
498 }
21c59685
SM
499 kfree(rq->mpwqe.mtt_no_align);
500 kfree(rq->mpwqe.info);
7e426671
TT
501}
502
a43b25da 503static int mlx5e_create_umr_mkey(struct mlx5_core_dev *mdev,
ec8b9981
TT
504 u64 npages, u8 page_shift,
505 struct mlx5_core_mkey *umr_mkey)
3608ae77 506{
3608ae77
TT
507 int inlen = MLX5_ST_SZ_BYTES(create_mkey_in);
508 void *mkc;
509 u32 *in;
510 int err;
511
ec8b9981
TT
512 if (!MLX5E_VALID_NUM_MTTS(npages))
513 return -EINVAL;
514
1b9a07ee 515 in = kvzalloc(inlen, GFP_KERNEL);
3608ae77
TT
516 if (!in)
517 return -ENOMEM;
518
519 mkc = MLX5_ADDR_OF(create_mkey_in, in, memory_key_mkey_entry);
520
3608ae77
TT
521 MLX5_SET(mkc, mkc, free, 1);
522 MLX5_SET(mkc, mkc, umr_en, 1);
523 MLX5_SET(mkc, mkc, lw, 1);
524 MLX5_SET(mkc, mkc, lr, 1);
525 MLX5_SET(mkc, mkc, access_mode, MLX5_MKC_ACCESS_MODE_MTT);
526
527 MLX5_SET(mkc, mkc, qpn, 0xffffff);
528 MLX5_SET(mkc, mkc, pd, mdev->mlx5e_res.pdn);
ec8b9981 529 MLX5_SET64(mkc, mkc, len, npages << page_shift);
3608ae77
TT
530 MLX5_SET(mkc, mkc, translations_octword_size,
531 MLX5_MTT_OCTW(npages));
ec8b9981 532 MLX5_SET(mkc, mkc, log_page_size, page_shift);
3608ae77 533
ec8b9981 534 err = mlx5_core_create_mkey(mdev, umr_mkey, in, inlen);
3608ae77
TT
535
536 kvfree(in);
537 return err;
538}
539
a43b25da 540static int mlx5e_create_rq_umr_mkey(struct mlx5_core_dev *mdev, struct mlx5e_rq *rq)
ec8b9981 541{
6a9764ef 542 u64 num_mtts = MLX5E_REQUIRED_MTTS(mlx5_wq_ll_get_size(&rq->wq));
ec8b9981 543
a43b25da 544 return mlx5e_create_umr_mkey(mdev, num_mtts, PAGE_SHIFT, &rq->umr_mkey);
ec8b9981
TT
545}
546
3b77235b 547static int mlx5e_alloc_rq(struct mlx5e_channel *c,
6a9764ef
SM
548 struct mlx5e_params *params,
549 struct mlx5e_rq_param *rqp,
3b77235b 550 struct mlx5e_rq *rq)
f62b8bb8 551{
a43b25da 552 struct mlx5_core_dev *mdev = c->mdev;
6a9764ef 553 void *rqc = rqp->rqc;
f62b8bb8 554 void *rqc_wq = MLX5_ADDR_OF(rqc, rqc, wq);
461017cb 555 u32 byte_count;
1bfecfca 556 int npages;
f62b8bb8
AV
557 int wq_sz;
558 int err;
559 int i;
560
231243c8 561 rqp->wq.db_numa_node = cpu_to_node(c->cpu);
311c7c71 562
6a9764ef 563 err = mlx5_wq_ll_create(mdev, &rqp->wq, rqc_wq, &rq->wq,
f62b8bb8
AV
564 &rq->wq_ctrl);
565 if (err)
566 return err;
567
568 rq->wq.db = &rq->wq.db[MLX5_RCV_DBR];
569
570 wq_sz = mlx5_wq_ll_get_size(&rq->wq);
f62b8bb8 571
6a9764ef 572 rq->wq_type = params->rq_wq_type;
7e426671
TT
573 rq->pdev = c->pdev;
574 rq->netdev = c->netdev;
a43b25da 575 rq->tstamp = c->tstamp;
7c39afb3 576 rq->clock = &mdev->clock;
7e426671
TT
577 rq->channel = c;
578 rq->ix = c->ix;
a43b25da 579 rq->mdev = mdev;
97bc402d 580
6a9764ef 581 rq->xdp_prog = params->xdp_prog ? bpf_prog_inc(params->xdp_prog) : NULL;
97bc402d
DB
582 if (IS_ERR(rq->xdp_prog)) {
583 err = PTR_ERR(rq->xdp_prog);
584 rq->xdp_prog = NULL;
585 goto err_rq_wq_destroy;
586 }
7e426671 587
bce2b2bf 588 rq->buff.map_dir = rq->xdp_prog ? DMA_BIDIRECTIONAL : DMA_FROM_DEVICE;
b45d8b50 589 rq->buff.headroom = params->rq_headroom;
b5503b99 590
6a9764ef 591 switch (rq->wq_type) {
461017cb 592 case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ:
f5f82476 593
7cc6d77b 594 rq->post_wqes = mlx5e_post_rx_mpwqes;
6cd392a0 595 rq->dealloc_wqe = mlx5e_dealloc_rx_mpwqe;
461017cb 596
20fd0c19 597 rq->handle_rx_cqe = c->priv->profile->rx_handlers.handle_rx_cqe_mpwqe;
899a59d3
IT
598#ifdef CONFIG_MLX5_EN_IPSEC
599 if (MLX5_IPSEC_DEV(mdev)) {
600 err = -EINVAL;
601 netdev_err(c->netdev, "MPWQE RQ with IPSec offload not supported\n");
602 goto err_rq_wq_destroy;
603 }
604#endif
20fd0c19
SM
605 if (!rq->handle_rx_cqe) {
606 err = -EINVAL;
607 netdev_err(c->netdev, "RX handler of MPWQE RQ is not set, err %d\n", err);
608 goto err_rq_wq_destroy;
609 }
610
89e89f7a 611 rq->mpwqe.log_stride_sz = params->mpwqe_log_stride_sz;
b45d8b50 612 rq->mpwqe.num_strides = BIT(params->mpwqe_log_num_strides);
1bfecfca 613
b681c481 614 byte_count = rq->mpwqe.num_strides << rq->mpwqe.log_stride_sz;
ec8b9981 615
a43b25da 616 err = mlx5e_create_rq_umr_mkey(mdev, rq);
7e426671
TT
617 if (err)
618 goto err_rq_wq_destroy;
ec8b9981
TT
619 rq->mkey_be = cpu_to_be32(rq->umr_mkey.key);
620
621 err = mlx5e_rq_alloc_mpwqe_info(rq, c);
622 if (err)
623 goto err_destroy_umr_mkey;
461017cb
TT
624 break;
625 default: /* MLX5_WQ_TYPE_LINKED_LIST */
accd5883
TT
626 rq->wqe.frag_info =
627 kzalloc_node(wq_sz * sizeof(*rq->wqe.frag_info),
231243c8 628 GFP_KERNEL, cpu_to_node(c->cpu));
accd5883 629 if (!rq->wqe.frag_info) {
461017cb
TT
630 err = -ENOMEM;
631 goto err_rq_wq_destroy;
632 }
7cc6d77b 633 rq->post_wqes = mlx5e_post_rx_wqes;
6cd392a0 634 rq->dealloc_wqe = mlx5e_dealloc_rx_wqe;
461017cb 635
899a59d3
IT
636#ifdef CONFIG_MLX5_EN_IPSEC
637 if (c->priv->ipsec)
638 rq->handle_rx_cqe = mlx5e_ipsec_handle_rx_cqe;
639 else
640#endif
641 rq->handle_rx_cqe = c->priv->profile->rx_handlers.handle_rx_cqe;
20fd0c19 642 if (!rq->handle_rx_cqe) {
accd5883 643 kfree(rq->wqe.frag_info);
20fd0c19
SM
644 err = -EINVAL;
645 netdev_err(c->netdev, "RX handler of RQ is not set, err %d\n", err);
646 goto err_rq_wq_destroy;
647 }
648
b681c481 649 byte_count = params->lro_en ?
6a9764ef 650 params->lro_wqe_sz :
c139dbfd 651 MLX5E_SW2HW_MTU(c->priv, c->netdev->mtu);
899a59d3
IT
652#ifdef CONFIG_MLX5_EN_IPSEC
653 if (MLX5_IPSEC_DEV(mdev))
b681c481 654 byte_count += MLX5E_METADATA_ETHER_LEN;
899a59d3 655#endif
accd5883 656 rq->wqe.page_reuse = !params->xdp_prog && !params->lro_en;
1bfecfca
SM
657
658 /* calc the required page order */
b45d8b50 659 rq->wqe.frag_sz = MLX5_SKB_FRAG_SZ(rq->buff.headroom + byte_count);
accd5883 660 npages = DIV_ROUND_UP(rq->wqe.frag_sz, PAGE_SIZE);
1bfecfca
SM
661 rq->buff.page_order = order_base_2(npages);
662
461017cb 663 byte_count |= MLX5_HW_START_PADDING;
7e426671 664 rq->mkey_be = c->mkey_be;
461017cb 665 }
f62b8bb8
AV
666
667 for (i = 0; i < wq_sz; i++) {
668 struct mlx5e_rx_wqe *wqe = mlx5_wq_ll_get_wqe(&rq->wq, i);
669
4c2af5cc
TT
670 if (rq->wq_type == MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ) {
671 u64 dma_offset = (u64)mlx5e_get_wqe_mtt_offset(rq, i) << PAGE_SHIFT;
672
673 wqe->data.addr = cpu_to_be64(dma_offset);
674 }
675
461017cb 676 wqe->data.byte_count = cpu_to_be32(byte_count);
7e426671 677 wqe->data.lkey = rq->mkey_be;
f62b8bb8
AV
678 }
679
cb3c7fd4 680 INIT_WORK(&rq->am.work, mlx5e_rx_am_work);
0088cbbc 681 rq->am.mode = params->rx_cq_moderation.cq_period_mode;
4415a031
TT
682 rq->page_cache.head = 0;
683 rq->page_cache.tail = 0;
684
f62b8bb8
AV
685 return 0;
686
ec8b9981
TT
687err_destroy_umr_mkey:
688 mlx5_core_destroy_mkey(mdev, &rq->umr_mkey);
689
f62b8bb8 690err_rq_wq_destroy:
97bc402d
DB
691 if (rq->xdp_prog)
692 bpf_prog_put(rq->xdp_prog);
f62b8bb8
AV
693 mlx5_wq_destroy(&rq->wq_ctrl);
694
695 return err;
696}
697
3b77235b 698static void mlx5e_free_rq(struct mlx5e_rq *rq)
f62b8bb8 699{
4415a031
TT
700 int i;
701
86994156
RS
702 if (rq->xdp_prog)
703 bpf_prog_put(rq->xdp_prog);
704
461017cb
TT
705 switch (rq->wq_type) {
706 case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ:
7e426671 707 mlx5e_rq_free_mpwqe_info(rq);
a43b25da 708 mlx5_core_destroy_mkey(rq->mdev, &rq->umr_mkey);
461017cb
TT
709 break;
710 default: /* MLX5_WQ_TYPE_LINKED_LIST */
accd5883 711 kfree(rq->wqe.frag_info);
461017cb
TT
712 }
713
4415a031
TT
714 for (i = rq->page_cache.head; i != rq->page_cache.tail;
715 i = (i + 1) & (MLX5E_CACHE_SIZE - 1)) {
716 struct mlx5e_dma_info *dma_info = &rq->page_cache.page_cache[i];
717
718 mlx5e_page_release(rq, dma_info, false);
719 }
f62b8bb8
AV
720 mlx5_wq_destroy(&rq->wq_ctrl);
721}
722
6a9764ef
SM
723static int mlx5e_create_rq(struct mlx5e_rq *rq,
724 struct mlx5e_rq_param *param)
f62b8bb8 725{
a43b25da 726 struct mlx5_core_dev *mdev = rq->mdev;
f62b8bb8
AV
727
728 void *in;
729 void *rqc;
730 void *wq;
731 int inlen;
732 int err;
733
734 inlen = MLX5_ST_SZ_BYTES(create_rq_in) +
735 sizeof(u64) * rq->wq_ctrl.buf.npages;
1b9a07ee 736 in = kvzalloc(inlen, GFP_KERNEL);
f62b8bb8
AV
737 if (!in)
738 return -ENOMEM;
739
740 rqc = MLX5_ADDR_OF(create_rq_in, in, ctx);
741 wq = MLX5_ADDR_OF(rqc, rqc, wq);
742
743 memcpy(rqc, param->rqc, sizeof(param->rqc));
744
97de9f31 745 MLX5_SET(rqc, rqc, cqn, rq->cq.mcq.cqn);
f62b8bb8 746 MLX5_SET(rqc, rqc, state, MLX5_RQC_STATE_RST);
f62b8bb8 747 MLX5_SET(wq, wq, log_wq_pg_sz, rq->wq_ctrl.buf.page_shift -
68cdf5d6 748 MLX5_ADAPTER_PAGE_SHIFT);
f62b8bb8
AV
749 MLX5_SET64(wq, wq, dbr_addr, rq->wq_ctrl.db.dma);
750
751 mlx5_fill_page_array(&rq->wq_ctrl.buf,
752 (__be64 *)MLX5_ADDR_OF(wq, wq, pas));
753
7db22ffb 754 err = mlx5_core_create_rq(mdev, in, inlen, &rq->rqn);
f62b8bb8
AV
755
756 kvfree(in);
757
758 return err;
759}
760
36350114
GP
761static int mlx5e_modify_rq_state(struct mlx5e_rq *rq, int curr_state,
762 int next_state)
f62b8bb8
AV
763{
764 struct mlx5e_channel *c = rq->channel;
a43b25da 765 struct mlx5_core_dev *mdev = c->mdev;
f62b8bb8
AV
766
767 void *in;
768 void *rqc;
769 int inlen;
770 int err;
771
772 inlen = MLX5_ST_SZ_BYTES(modify_rq_in);
1b9a07ee 773 in = kvzalloc(inlen, GFP_KERNEL);
f62b8bb8
AV
774 if (!in)
775 return -ENOMEM;
776
777 rqc = MLX5_ADDR_OF(modify_rq_in, in, ctx);
778
779 MLX5_SET(modify_rq_in, in, rq_state, curr_state);
780 MLX5_SET(rqc, rqc, state, next_state);
781
7db22ffb 782 err = mlx5_core_modify_rq(mdev, rq->rqn, in, inlen);
f62b8bb8
AV
783
784 kvfree(in);
785
786 return err;
787}
788
102722fc
GE
789static int mlx5e_modify_rq_scatter_fcs(struct mlx5e_rq *rq, bool enable)
790{
791 struct mlx5e_channel *c = rq->channel;
792 struct mlx5e_priv *priv = c->priv;
793 struct mlx5_core_dev *mdev = priv->mdev;
794
795 void *in;
796 void *rqc;
797 int inlen;
798 int err;
799
800 inlen = MLX5_ST_SZ_BYTES(modify_rq_in);
1b9a07ee 801 in = kvzalloc(inlen, GFP_KERNEL);
102722fc
GE
802 if (!in)
803 return -ENOMEM;
804
805 rqc = MLX5_ADDR_OF(modify_rq_in, in, ctx);
806
807 MLX5_SET(modify_rq_in, in, rq_state, MLX5_RQC_STATE_RDY);
808 MLX5_SET64(modify_rq_in, in, modify_bitmask,
809 MLX5_MODIFY_RQ_IN_MODIFY_BITMASK_SCATTER_FCS);
810 MLX5_SET(rqc, rqc, scatter_fcs, enable);
811 MLX5_SET(rqc, rqc, state, MLX5_RQC_STATE_RDY);
812
813 err = mlx5_core_modify_rq(mdev, rq->rqn, in, inlen);
814
815 kvfree(in);
816
817 return err;
818}
819
36350114
GP
820static int mlx5e_modify_rq_vsd(struct mlx5e_rq *rq, bool vsd)
821{
822 struct mlx5e_channel *c = rq->channel;
a43b25da 823 struct mlx5_core_dev *mdev = c->mdev;
36350114
GP
824 void *in;
825 void *rqc;
826 int inlen;
827 int err;
828
829 inlen = MLX5_ST_SZ_BYTES(modify_rq_in);
1b9a07ee 830 in = kvzalloc(inlen, GFP_KERNEL);
36350114
GP
831 if (!in)
832 return -ENOMEM;
833
834 rqc = MLX5_ADDR_OF(modify_rq_in, in, ctx);
835
836 MLX5_SET(modify_rq_in, in, rq_state, MLX5_RQC_STATE_RDY);
83b502a1
AV
837 MLX5_SET64(modify_rq_in, in, modify_bitmask,
838 MLX5_MODIFY_RQ_IN_MODIFY_BITMASK_VSD);
36350114
GP
839 MLX5_SET(rqc, rqc, vsd, vsd);
840 MLX5_SET(rqc, rqc, state, MLX5_RQC_STATE_RDY);
841
842 err = mlx5_core_modify_rq(mdev, rq->rqn, in, inlen);
843
844 kvfree(in);
845
846 return err;
847}
848
3b77235b 849static void mlx5e_destroy_rq(struct mlx5e_rq *rq)
f62b8bb8 850{
a43b25da 851 mlx5_core_destroy_rq(rq->mdev, rq->rqn);
f62b8bb8
AV
852}
853
854static int mlx5e_wait_for_min_rx_wqes(struct mlx5e_rq *rq)
855{
01c196a2 856 unsigned long exp_time = jiffies + msecs_to_jiffies(20000);
f62b8bb8 857 struct mlx5e_channel *c = rq->channel;
a43b25da 858
f62b8bb8 859 struct mlx5_wq_ll *wq = &rq->wq;
6a9764ef 860 u16 min_wqes = mlx5_min_rx_wqes(rq->wq_type, mlx5_wq_ll_get_size(wq));
f62b8bb8 861
01c196a2 862 while (time_before(jiffies, exp_time)) {
6a9764ef 863 if (wq->cur_sz >= min_wqes)
f62b8bb8
AV
864 return 0;
865
866 msleep(20);
867 }
868
a43b25da 869 netdev_warn(c->netdev, "Failed to get min RX wqes on RQN[0x%x] wq cur_sz(%d) min_rx_wqes(%d)\n",
6a9764ef 870 rq->rqn, wq->cur_sz, min_wqes);
f62b8bb8
AV
871 return -ETIMEDOUT;
872}
873
f2fde18c
SM
874static void mlx5e_free_rx_descs(struct mlx5e_rq *rq)
875{
876 struct mlx5_wq_ll *wq = &rq->wq;
877 struct mlx5e_rx_wqe *wqe;
878 __be16 wqe_ix_be;
879 u16 wqe_ix;
880
8484f9ed 881 /* UMR WQE (if in progress) is always at wq->head */
a071cb9f
TT
882 if (rq->wq_type == MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ &&
883 rq->mpwqe.umr_in_progress)
21c59685 884 mlx5e_free_rx_mpwqe(rq, &rq->mpwqe.info[wq->head]);
8484f9ed 885
f2fde18c
SM
886 while (!mlx5_wq_ll_is_empty(wq)) {
887 wqe_ix_be = *wq->tail_next;
888 wqe_ix = be16_to_cpu(wqe_ix_be);
889 wqe = mlx5_wq_ll_get_wqe(&rq->wq, wqe_ix);
890 rq->dealloc_wqe(rq, wqe_ix);
891 mlx5_wq_ll_pop(&rq->wq, wqe_ix_be,
892 &wqe->next.next_wqe_index);
893 }
accd5883
TT
894
895 if (rq->wq_type == MLX5_WQ_TYPE_LINKED_LIST && rq->wqe.page_reuse) {
896 /* Clean outstanding pages on handled WQEs that decided to do page-reuse,
897 * but yet to be re-posted.
898 */
899 int wq_sz = mlx5_wq_ll_get_size(&rq->wq);
900
901 for (wqe_ix = 0; wqe_ix < wq_sz; wqe_ix++)
902 rq->dealloc_wqe(rq, wqe_ix);
903 }
f2fde18c
SM
904}
905
f62b8bb8 906static int mlx5e_open_rq(struct mlx5e_channel *c,
6a9764ef 907 struct mlx5e_params *params,
f62b8bb8
AV
908 struct mlx5e_rq_param *param,
909 struct mlx5e_rq *rq)
910{
911 int err;
912
6a9764ef 913 err = mlx5e_alloc_rq(c, params, param, rq);
f62b8bb8
AV
914 if (err)
915 return err;
916
3b77235b 917 err = mlx5e_create_rq(rq, param);
f62b8bb8 918 if (err)
3b77235b 919 goto err_free_rq;
f62b8bb8 920
36350114 921 err = mlx5e_modify_rq_state(rq, MLX5_RQC_STATE_RST, MLX5_RQC_STATE_RDY);
f62b8bb8 922 if (err)
3b77235b 923 goto err_destroy_rq;
f62b8bb8 924
6a9764ef 925 if (params->rx_am_enabled)
a1eaba4c 926 c->rq.state |= BIT(MLX5E_RQ_STATE_AM);
cb3c7fd4 927
786fb87b
SM
928 if (MLX5_CAP_ETH(c->mdev, cqe_checksum_full))
929 __set_bit(MLX5E_RQ_STATE_CSUM_FULL, &c->rq.state);
930
68003d06
SM
931 /* We disable csum_complete when XDP is enabled since
932 * XDP programs might manipulate packets which will render
933 * skb->checksum incorrect.
934 */
935 if (MLX5E_GET_PFLAG(params, MLX5E_PFLAG_RX_NO_CSUM_COMPLETE) || c->xdp)
a358fcc4
OG
936 __set_bit(MLX5E_RQ_STATE_NO_CSUM_COMPLETE, &c->rq.state);
937
f62b8bb8
AV
938 return 0;
939
f62b8bb8
AV
940err_destroy_rq:
941 mlx5e_destroy_rq(rq);
3b77235b
SM
942err_free_rq:
943 mlx5e_free_rq(rq);
f62b8bb8
AV
944
945 return err;
946}
947
acc6c595
SM
948static void mlx5e_activate_rq(struct mlx5e_rq *rq)
949{
950 struct mlx5e_icosq *sq = &rq->channel->icosq;
951 u16 pi = sq->pc & sq->wq.sz_m1;
952 struct mlx5e_tx_wqe *nopwqe;
953
954 set_bit(MLX5E_RQ_STATE_ENABLED, &rq->state);
955 sq->db.ico_wqe[pi].opcode = MLX5_OPCODE_NOP;
acc6c595
SM
956 nopwqe = mlx5e_post_nop(&sq->wq, sq->sqn, &sq->pc);
957 mlx5e_notify_hw(&sq->wq, sq->pc, sq->uar_map, &nopwqe->ctrl);
958}
959
960static void mlx5e_deactivate_rq(struct mlx5e_rq *rq)
f62b8bb8 961{
c0f1147d 962 clear_bit(MLX5E_RQ_STATE_ENABLED, &rq->state);
f62b8bb8 963 napi_synchronize(&rq->channel->napi); /* prevent mlx5e_post_rx_wqes */
acc6c595 964}
cb3c7fd4 965
acc6c595
SM
966static void mlx5e_close_rq(struct mlx5e_rq *rq)
967{
968 cancel_work_sync(&rq->am.work);
f62b8bb8 969 mlx5e_destroy_rq(rq);
3b77235b
SM
970 mlx5e_free_rx_descs(rq);
971 mlx5e_free_rq(rq);
f62b8bb8
AV
972}
973
31391048 974static void mlx5e_free_xdpsq_db(struct mlx5e_xdpsq *sq)
b5503b99 975{
31391048 976 kfree(sq->db.di);
b5503b99
SM
977}
978
31391048 979static int mlx5e_alloc_xdpsq_db(struct mlx5e_xdpsq *sq, int numa)
b5503b99
SM
980{
981 int wq_sz = mlx5_wq_cyc_get_size(&sq->wq);
982
31391048 983 sq->db.di = kzalloc_node(sizeof(*sq->db.di) * wq_sz,
b5503b99 984 GFP_KERNEL, numa);
31391048
SM
985 if (!sq->db.di) {
986 mlx5e_free_xdpsq_db(sq);
b5503b99
SM
987 return -ENOMEM;
988 }
989
990 return 0;
991}
992
31391048 993static int mlx5e_alloc_xdpsq(struct mlx5e_channel *c,
6a9764ef 994 struct mlx5e_params *params,
31391048
SM
995 struct mlx5e_sq_param *param,
996 struct mlx5e_xdpsq *sq)
997{
998 void *sqc_wq = MLX5_ADDR_OF(sqc, param->sqc, wq);
a43b25da 999 struct mlx5_core_dev *mdev = c->mdev;
31391048
SM
1000 int err;
1001
1002 sq->pdev = c->pdev;
1003 sq->mkey_be = c->mkey_be;
1004 sq->channel = c;
1005 sq->uar_map = mdev->mlx5e_res.bfreg.map;
6a9764ef 1006 sq->min_inline_mode = params->tx_min_inline_mode;
31391048 1007
231243c8 1008 param->wq.db_numa_node = cpu_to_node(c->cpu);
31391048
SM
1009 err = mlx5_wq_cyc_create(mdev, &param->wq, sqc_wq, &sq->wq, &sq->wq_ctrl);
1010 if (err)
1011 return err;
1012 sq->wq.db = &sq->wq.db[MLX5_SND_DBR];
1013
231243c8 1014 err = mlx5e_alloc_xdpsq_db(sq, cpu_to_node(c->cpu));
31391048
SM
1015 if (err)
1016 goto err_sq_wq_destroy;
1017
1018 return 0;
1019
1020err_sq_wq_destroy:
1021 mlx5_wq_destroy(&sq->wq_ctrl);
1022
1023 return err;
1024}
1025
1026static void mlx5e_free_xdpsq(struct mlx5e_xdpsq *sq)
1027{
1028 mlx5e_free_xdpsq_db(sq);
1029 mlx5_wq_destroy(&sq->wq_ctrl);
1030}
1031
1032static void mlx5e_free_icosq_db(struct mlx5e_icosq *sq)
f62b8bb8 1033{
f10b7cc7 1034 kfree(sq->db.ico_wqe);
f62b8bb8
AV
1035}
1036
31391048 1037static int mlx5e_alloc_icosq_db(struct mlx5e_icosq *sq, int numa)
f10b7cc7
SM
1038{
1039 u8 wq_sz = mlx5_wq_cyc_get_size(&sq->wq);
1040
1041 sq->db.ico_wqe = kzalloc_node(sizeof(*sq->db.ico_wqe) * wq_sz,
1042 GFP_KERNEL, numa);
1043 if (!sq->db.ico_wqe)
1044 return -ENOMEM;
1045
1046 return 0;
1047}
1048
31391048 1049static int mlx5e_alloc_icosq(struct mlx5e_channel *c,
31391048
SM
1050 struct mlx5e_sq_param *param,
1051 struct mlx5e_icosq *sq)
f10b7cc7 1052{
31391048 1053 void *sqc_wq = MLX5_ADDR_OF(sqc, param->sqc, wq);
a43b25da 1054 struct mlx5_core_dev *mdev = c->mdev;
31391048 1055 int err;
f10b7cc7 1056
31391048
SM
1057 sq->mkey_be = c->mkey_be;
1058 sq->channel = c;
1059 sq->uar_map = mdev->mlx5e_res.bfreg.map;
f62b8bb8 1060
231243c8 1061 param->wq.db_numa_node = cpu_to_node(c->cpu);
31391048
SM
1062 err = mlx5_wq_cyc_create(mdev, &param->wq, sqc_wq, &sq->wq, &sq->wq_ctrl);
1063 if (err)
1064 return err;
1065 sq->wq.db = &sq->wq.db[MLX5_SND_DBR];
f62b8bb8 1066
231243c8 1067 err = mlx5e_alloc_icosq_db(sq, cpu_to_node(c->cpu));
31391048
SM
1068 if (err)
1069 goto err_sq_wq_destroy;
1070
1071 sq->edge = (sq->wq.sz_m1 + 1) - MLX5E_ICOSQ_MAX_WQEBBS;
f62b8bb8
AV
1072
1073 return 0;
31391048
SM
1074
1075err_sq_wq_destroy:
1076 mlx5_wq_destroy(&sq->wq_ctrl);
1077
1078 return err;
f62b8bb8
AV
1079}
1080
31391048 1081static void mlx5e_free_icosq(struct mlx5e_icosq *sq)
f10b7cc7 1082{
31391048
SM
1083 mlx5e_free_icosq_db(sq);
1084 mlx5_wq_destroy(&sq->wq_ctrl);
f10b7cc7
SM
1085}
1086
31391048 1087static void mlx5e_free_txqsq_db(struct mlx5e_txqsq *sq)
f10b7cc7 1088{
31391048
SM
1089 kfree(sq->db.wqe_info);
1090 kfree(sq->db.dma_fifo);
f10b7cc7
SM
1091}
1092
31391048 1093static int mlx5e_alloc_txqsq_db(struct mlx5e_txqsq *sq, int numa)
b5503b99 1094{
31391048
SM
1095 int wq_sz = mlx5_wq_cyc_get_size(&sq->wq);
1096 int df_sz = wq_sz * MLX5_SEND_WQEBB_NUM_DS;
1097
31391048
SM
1098 sq->db.dma_fifo = kzalloc_node(df_sz * sizeof(*sq->db.dma_fifo),
1099 GFP_KERNEL, numa);
1100 sq->db.wqe_info = kzalloc_node(wq_sz * sizeof(*sq->db.wqe_info),
1101 GFP_KERNEL, numa);
77bdf895 1102 if (!sq->db.dma_fifo || !sq->db.wqe_info) {
31391048
SM
1103 mlx5e_free_txqsq_db(sq);
1104 return -ENOMEM;
b5503b99 1105 }
31391048
SM
1106
1107 sq->dma_fifo_mask = df_sz - 1;
1108
1109 return 0;
b5503b99
SM
1110}
1111
31391048 1112static int mlx5e_alloc_txqsq(struct mlx5e_channel *c,
acc6c595 1113 int txq_ix,
6a9764ef 1114 struct mlx5e_params *params,
31391048
SM
1115 struct mlx5e_sq_param *param,
1116 struct mlx5e_txqsq *sq)
f62b8bb8 1117{
31391048 1118 void *sqc_wq = MLX5_ADDR_OF(sqc, param->sqc, wq);
a43b25da 1119 struct mlx5_core_dev *mdev = c->mdev;
f62b8bb8
AV
1120 int err;
1121
f10b7cc7 1122 sq->pdev = c->pdev;
a43b25da 1123 sq->tstamp = c->tstamp;
7c39afb3 1124 sq->clock = &mdev->clock;
f10b7cc7
SM
1125 sq->mkey_be = c->mkey_be;
1126 sq->channel = c;
acc6c595 1127 sq->txq_ix = txq_ix;
aff26157 1128 sq->uar_map = mdev->mlx5e_res.bfreg.map;
6a9764ef
SM
1129 sq->max_inline = params->tx_max_inline;
1130 sq->min_inline_mode = params->tx_min_inline_mode;
2ac9cfe7
IT
1131 if (MLX5_IPSEC_DEV(c->priv->mdev))
1132 set_bit(MLX5E_SQ_STATE_IPSEC, &sq->state);
f10b7cc7 1133
231243c8 1134 param->wq.db_numa_node = cpu_to_node(c->cpu);
31391048 1135 err = mlx5_wq_cyc_create(mdev, &param->wq, sqc_wq, &sq->wq, &sq->wq_ctrl);
f62b8bb8 1136 if (err)
aff26157 1137 return err;
31391048 1138 sq->wq.db = &sq->wq.db[MLX5_SND_DBR];
f62b8bb8 1139
231243c8 1140 err = mlx5e_alloc_txqsq_db(sq, cpu_to_node(c->cpu));
7ec0bb22 1141 if (err)
f62b8bb8
AV
1142 goto err_sq_wq_destroy;
1143
31391048 1144 sq->edge = (sq->wq.sz_m1 + 1) - MLX5_SEND_WQE_MAX_WQEBBS;
f62b8bb8
AV
1145
1146 return 0;
1147
1148err_sq_wq_destroy:
1149 mlx5_wq_destroy(&sq->wq_ctrl);
1150
f62b8bb8
AV
1151 return err;
1152}
1153
31391048 1154static void mlx5e_free_txqsq(struct mlx5e_txqsq *sq)
f62b8bb8 1155{
31391048 1156 mlx5e_free_txqsq_db(sq);
f62b8bb8 1157 mlx5_wq_destroy(&sq->wq_ctrl);
f62b8bb8
AV
1158}
1159
33ad9711
SM
1160struct mlx5e_create_sq_param {
1161 struct mlx5_wq_ctrl *wq_ctrl;
1162 u32 cqn;
1163 u32 tisn;
1164 u8 tis_lst_sz;
1165 u8 min_inline_mode;
1166};
1167
a43b25da 1168static int mlx5e_create_sq(struct mlx5_core_dev *mdev,
33ad9711
SM
1169 struct mlx5e_sq_param *param,
1170 struct mlx5e_create_sq_param *csp,
1171 u32 *sqn)
f62b8bb8 1172{
f62b8bb8
AV
1173 void *in;
1174 void *sqc;
1175 void *wq;
1176 int inlen;
1177 int err;
1178
1179 inlen = MLX5_ST_SZ_BYTES(create_sq_in) +
33ad9711 1180 sizeof(u64) * csp->wq_ctrl->buf.npages;
1b9a07ee 1181 in = kvzalloc(inlen, GFP_KERNEL);
f62b8bb8
AV
1182 if (!in)
1183 return -ENOMEM;
1184
1185 sqc = MLX5_ADDR_OF(create_sq_in, in, ctx);
1186 wq = MLX5_ADDR_OF(sqc, sqc, wq);
1187
1188 memcpy(sqc, param->sqc, sizeof(param->sqc));
33ad9711
SM
1189 MLX5_SET(sqc, sqc, tis_lst_sz, csp->tis_lst_sz);
1190 MLX5_SET(sqc, sqc, tis_num_0, csp->tisn);
1191 MLX5_SET(sqc, sqc, cqn, csp->cqn);
a6f402e4
SM
1192
1193 if (MLX5_CAP_ETH(mdev, wqe_inline_mode) == MLX5_CAP_INLINE_MODE_VPORT_CONTEXT)
33ad9711 1194 MLX5_SET(sqc, sqc, min_wqe_inline_mode, csp->min_inline_mode);
a6f402e4 1195
33ad9711 1196 MLX5_SET(sqc, sqc, state, MLX5_SQC_STATE_RST);
f62b8bb8
AV
1197
1198 MLX5_SET(wq, wq, wq_type, MLX5_WQ_TYPE_CYCLIC);
a43b25da 1199 MLX5_SET(wq, wq, uar_page, mdev->mlx5e_res.bfreg.index);
33ad9711 1200 MLX5_SET(wq, wq, log_wq_pg_sz, csp->wq_ctrl->buf.page_shift -
68cdf5d6 1201 MLX5_ADAPTER_PAGE_SHIFT);
33ad9711 1202 MLX5_SET64(wq, wq, dbr_addr, csp->wq_ctrl->db.dma);
f62b8bb8 1203
33ad9711 1204 mlx5_fill_page_array(&csp->wq_ctrl->buf, (__be64 *)MLX5_ADDR_OF(wq, wq, pas));
f62b8bb8 1205
33ad9711 1206 err = mlx5_core_create_sq(mdev, in, inlen, sqn);
f62b8bb8
AV
1207
1208 kvfree(in);
1209
1210 return err;
1211}
1212
33ad9711
SM
1213struct mlx5e_modify_sq_param {
1214 int curr_state;
1215 int next_state;
1216 bool rl_update;
1217 int rl_index;
1218};
1219
a43b25da 1220static int mlx5e_modify_sq(struct mlx5_core_dev *mdev, u32 sqn,
33ad9711 1221 struct mlx5e_modify_sq_param *p)
f62b8bb8 1222{
f62b8bb8
AV
1223 void *in;
1224 void *sqc;
1225 int inlen;
1226 int err;
1227
1228 inlen = MLX5_ST_SZ_BYTES(modify_sq_in);
1b9a07ee 1229 in = kvzalloc(inlen, GFP_KERNEL);
f62b8bb8
AV
1230 if (!in)
1231 return -ENOMEM;
1232
1233 sqc = MLX5_ADDR_OF(modify_sq_in, in, ctx);
1234
33ad9711
SM
1235 MLX5_SET(modify_sq_in, in, sq_state, p->curr_state);
1236 MLX5_SET(sqc, sqc, state, p->next_state);
1237 if (p->rl_update && p->next_state == MLX5_SQC_STATE_RDY) {
507f0c81 1238 MLX5_SET64(modify_sq_in, in, modify_bitmask, 1);
33ad9711 1239 MLX5_SET(sqc, sqc, packet_pacing_rate_limit_index, p->rl_index);
507f0c81 1240 }
f62b8bb8 1241
33ad9711 1242 err = mlx5_core_modify_sq(mdev, sqn, in, inlen);
f62b8bb8
AV
1243
1244 kvfree(in);
1245
1246 return err;
1247}
1248
a43b25da 1249static void mlx5e_destroy_sq(struct mlx5_core_dev *mdev, u32 sqn)
33ad9711 1250{
a43b25da 1251 mlx5_core_destroy_sq(mdev, sqn);
f62b8bb8
AV
1252}
1253
a43b25da 1254static int mlx5e_create_sq_rdy(struct mlx5_core_dev *mdev,
31391048
SM
1255 struct mlx5e_sq_param *param,
1256 struct mlx5e_create_sq_param *csp,
1257 u32 *sqn)
f62b8bb8 1258{
33ad9711 1259 struct mlx5e_modify_sq_param msp = {0};
31391048
SM
1260 int err;
1261
a43b25da 1262 err = mlx5e_create_sq(mdev, param, csp, sqn);
31391048
SM
1263 if (err)
1264 return err;
1265
1266 msp.curr_state = MLX5_SQC_STATE_RST;
1267 msp.next_state = MLX5_SQC_STATE_RDY;
a43b25da 1268 err = mlx5e_modify_sq(mdev, *sqn, &msp);
31391048 1269 if (err)
a43b25da 1270 mlx5e_destroy_sq(mdev, *sqn);
31391048
SM
1271
1272 return err;
1273}
1274
7f859ecf
SM
1275static int mlx5e_set_sq_maxrate(struct net_device *dev,
1276 struct mlx5e_txqsq *sq, u32 rate);
1277
31391048 1278static int mlx5e_open_txqsq(struct mlx5e_channel *c,
a43b25da 1279 u32 tisn,
acc6c595 1280 int txq_ix,
6a9764ef 1281 struct mlx5e_params *params,
31391048
SM
1282 struct mlx5e_sq_param *param,
1283 struct mlx5e_txqsq *sq)
1284{
1285 struct mlx5e_create_sq_param csp = {};
7f859ecf 1286 u32 tx_rate;
f62b8bb8
AV
1287 int err;
1288
6a9764ef 1289 err = mlx5e_alloc_txqsq(c, txq_ix, params, param, sq);
f62b8bb8
AV
1290 if (err)
1291 return err;
1292
a43b25da 1293 csp.tisn = tisn;
31391048 1294 csp.tis_lst_sz = 1;
33ad9711
SM
1295 csp.cqn = sq->cq.mcq.cqn;
1296 csp.wq_ctrl = &sq->wq_ctrl;
1297 csp.min_inline_mode = sq->min_inline_mode;
a43b25da 1298 err = mlx5e_create_sq_rdy(c->mdev, param, &csp, &sq->sqn);
f62b8bb8 1299 if (err)
31391048 1300 goto err_free_txqsq;
f62b8bb8 1301
a43b25da 1302 tx_rate = c->priv->tx_rates[sq->txq_ix];
7f859ecf 1303 if (tx_rate)
a43b25da 1304 mlx5e_set_sq_maxrate(c->netdev, sq, tx_rate);
7f859ecf 1305
f62b8bb8
AV
1306 return 0;
1307
31391048 1308err_free_txqsq:
3b77235b 1309 clear_bit(MLX5E_SQ_STATE_ENABLED, &sq->state);
31391048 1310 mlx5e_free_txqsq(sq);
f62b8bb8
AV
1311
1312 return err;
1313}
1314
acc6c595
SM
1315static void mlx5e_activate_txqsq(struct mlx5e_txqsq *sq)
1316{
a43b25da 1317 sq->txq = netdev_get_tx_queue(sq->channel->netdev, sq->txq_ix);
acc6c595
SM
1318 set_bit(MLX5E_SQ_STATE_ENABLED, &sq->state);
1319 netdev_tx_reset_queue(sq->txq);
1320 netif_tx_start_queue(sq->txq);
1321}
1322
f62b8bb8
AV
1323static inline void netif_tx_disable_queue(struct netdev_queue *txq)
1324{
1325 __netif_tx_lock_bh(txq);
1326 netif_tx_stop_queue(txq);
1327 __netif_tx_unlock_bh(txq);
1328}
1329
acc6c595 1330static void mlx5e_deactivate_txqsq(struct mlx5e_txqsq *sq)
f62b8bb8 1331{
33ad9711 1332 struct mlx5e_channel *c = sq->channel;
33ad9711 1333
c0f1147d 1334 clear_bit(MLX5E_SQ_STATE_ENABLED, &sq->state);
6e8dd6d6 1335 /* prevent netif_tx_wake_queue */
33ad9711 1336 napi_synchronize(&c->napi);
29429f33 1337
31391048 1338 netif_tx_disable_queue(sq->txq);
f62b8bb8 1339
31391048
SM
1340 /* last doorbell out, godspeed .. */
1341 if (mlx5e_wqc_has_room_for(&sq->wq, sq->cc, sq->pc, 1)) {
1342 struct mlx5e_tx_wqe *nop;
864b2d71 1343
77bdf895 1344 sq->db.wqe_info[(sq->pc & sq->wq.sz_m1)].skb = NULL;
31391048
SM
1345 nop = mlx5e_post_nop(&sq->wq, sq->sqn, &sq->pc);
1346 mlx5e_notify_hw(&sq->wq, sq->pc, sq->uar_map, &nop->ctrl);
29429f33 1347 }
acc6c595
SM
1348}
1349
1350static void mlx5e_close_txqsq(struct mlx5e_txqsq *sq)
1351{
1352 struct mlx5e_channel *c = sq->channel;
a43b25da 1353 struct mlx5_core_dev *mdev = c->mdev;
f62b8bb8 1354
a43b25da 1355 mlx5e_destroy_sq(mdev, sq->sqn);
33ad9711
SM
1356 if (sq->rate_limit)
1357 mlx5_rl_remove_rate(mdev, sq->rate_limit);
31391048
SM
1358 mlx5e_free_txqsq_descs(sq);
1359 mlx5e_free_txqsq(sq);
1360}
1361
1362static int mlx5e_open_icosq(struct mlx5e_channel *c,
6a9764ef 1363 struct mlx5e_params *params,
31391048
SM
1364 struct mlx5e_sq_param *param,
1365 struct mlx5e_icosq *sq)
1366{
1367 struct mlx5e_create_sq_param csp = {};
1368 int err;
1369
6a9764ef 1370 err = mlx5e_alloc_icosq(c, param, sq);
31391048
SM
1371 if (err)
1372 return err;
1373
1374 csp.cqn = sq->cq.mcq.cqn;
1375 csp.wq_ctrl = &sq->wq_ctrl;
6a9764ef 1376 csp.min_inline_mode = params->tx_min_inline_mode;
31391048 1377 set_bit(MLX5E_SQ_STATE_ENABLED, &sq->state);
a43b25da 1378 err = mlx5e_create_sq_rdy(c->mdev, param, &csp, &sq->sqn);
31391048
SM
1379 if (err)
1380 goto err_free_icosq;
1381
1382 return 0;
1383
1384err_free_icosq:
1385 clear_bit(MLX5E_SQ_STATE_ENABLED, &sq->state);
1386 mlx5e_free_icosq(sq);
1387
1388 return err;
1389}
1390
1391static void mlx5e_close_icosq(struct mlx5e_icosq *sq)
1392{
1393 struct mlx5e_channel *c = sq->channel;
1394
1395 clear_bit(MLX5E_SQ_STATE_ENABLED, &sq->state);
1396 napi_synchronize(&c->napi);
1397
a43b25da 1398 mlx5e_destroy_sq(c->mdev, sq->sqn);
31391048
SM
1399 mlx5e_free_icosq(sq);
1400}
1401
1402static int mlx5e_open_xdpsq(struct mlx5e_channel *c,
6a9764ef 1403 struct mlx5e_params *params,
31391048
SM
1404 struct mlx5e_sq_param *param,
1405 struct mlx5e_xdpsq *sq)
1406{
1407 unsigned int ds_cnt = MLX5E_XDP_TX_DS_COUNT;
1408 struct mlx5e_create_sq_param csp = {};
31391048
SM
1409 unsigned int inline_hdr_sz = 0;
1410 int err;
1411 int i;
1412
6a9764ef 1413 err = mlx5e_alloc_xdpsq(c, params, param, sq);
31391048
SM
1414 if (err)
1415 return err;
1416
1417 csp.tis_lst_sz = 1;
a43b25da 1418 csp.tisn = c->priv->tisn[0]; /* tc = 0 */
31391048
SM
1419 csp.cqn = sq->cq.mcq.cqn;
1420 csp.wq_ctrl = &sq->wq_ctrl;
1421 csp.min_inline_mode = sq->min_inline_mode;
1422 set_bit(MLX5E_SQ_STATE_ENABLED, &sq->state);
a43b25da 1423 err = mlx5e_create_sq_rdy(c->mdev, param, &csp, &sq->sqn);
31391048
SM
1424 if (err)
1425 goto err_free_xdpsq;
1426
1427 if (sq->min_inline_mode != MLX5_INLINE_MODE_NONE) {
1428 inline_hdr_sz = MLX5E_XDP_MIN_INLINE;
1429 ds_cnt++;
1430 }
1431
1432 /* Pre initialize fixed WQE fields */
1433 for (i = 0; i < mlx5_wq_cyc_get_size(&sq->wq); i++) {
1434 struct mlx5e_tx_wqe *wqe = mlx5_wq_cyc_get_wqe(&sq->wq, i);
1435 struct mlx5_wqe_ctrl_seg *cseg = &wqe->ctrl;
1436 struct mlx5_wqe_eth_seg *eseg = &wqe->eth;
1437 struct mlx5_wqe_data_seg *dseg;
1438
1439 cseg->qpn_ds = cpu_to_be32((sq->sqn << 8) | ds_cnt);
1440 eseg->inline_hdr.sz = cpu_to_be16(inline_hdr_sz);
1441
1442 dseg = (struct mlx5_wqe_data_seg *)cseg + (ds_cnt - 1);
1443 dseg->lkey = sq->mkey_be;
1444 }
1445
1446 return 0;
1447
1448err_free_xdpsq:
1449 clear_bit(MLX5E_SQ_STATE_ENABLED, &sq->state);
1450 mlx5e_free_xdpsq(sq);
1451
1452 return err;
1453}
1454
1455static void mlx5e_close_xdpsq(struct mlx5e_xdpsq *sq)
1456{
1457 struct mlx5e_channel *c = sq->channel;
1458
1459 clear_bit(MLX5E_SQ_STATE_ENABLED, &sq->state);
1460 napi_synchronize(&c->napi);
1461
a43b25da 1462 mlx5e_destroy_sq(c->mdev, sq->sqn);
31391048
SM
1463 mlx5e_free_xdpsq_descs(sq);
1464 mlx5e_free_xdpsq(sq);
f62b8bb8
AV
1465}
1466
95b6c6a5
EBE
1467static int mlx5e_alloc_cq_common(struct mlx5_core_dev *mdev,
1468 struct mlx5e_cq_param *param,
1469 struct mlx5e_cq *cq)
f62b8bb8 1470{
f62b8bb8
AV
1471 struct mlx5_core_cq *mcq = &cq->mcq;
1472 int eqn_not_used;
0b6e26ce 1473 unsigned int irqn;
f62b8bb8
AV
1474 int err;
1475 u32 i;
1476
4d7d3ed9
YA
1477 err = mlx5_vector2eqn(mdev, param->eq_ix, &eqn_not_used, &irqn);
1478 if (err)
1479 return err;
1480
f62b8bb8
AV
1481 err = mlx5_cqwq_create(mdev, &param->wq, param->cqc, &cq->wq,
1482 &cq->wq_ctrl);
1483 if (err)
1484 return err;
1485
f62b8bb8
AV
1486 mcq->cqe_sz = 64;
1487 mcq->set_ci_db = cq->wq_ctrl.db.db;
1488 mcq->arm_db = cq->wq_ctrl.db.db + 1;
1489 *mcq->set_ci_db = 0;
1490 *mcq->arm_db = 0;
1491 mcq->vector = param->eq_ix;
1492 mcq->comp = mlx5e_completion_event;
1493 mcq->event = mlx5e_cq_error_event;
1494 mcq->irqn = irqn;
f62b8bb8
AV
1495
1496 for (i = 0; i < mlx5_cqwq_get_size(&cq->wq); i++) {
1497 struct mlx5_cqe64 *cqe = mlx5_cqwq_get_wqe(&cq->wq, i);
1498
1499 cqe->op_own = 0xf1;
1500 }
1501
a43b25da 1502 cq->mdev = mdev;
f62b8bb8
AV
1503
1504 return 0;
1505}
1506
95b6c6a5
EBE
1507static int mlx5e_alloc_cq(struct mlx5e_channel *c,
1508 struct mlx5e_cq_param *param,
1509 struct mlx5e_cq *cq)
1510{
1511 struct mlx5_core_dev *mdev = c->priv->mdev;
1512 int err;
1513
231243c8
SM
1514 param->wq.buf_numa_node = cpu_to_node(c->cpu);
1515 param->wq.db_numa_node = cpu_to_node(c->cpu);
95b6c6a5
EBE
1516 param->eq_ix = c->ix;
1517
1518 err = mlx5e_alloc_cq_common(mdev, param, cq);
1519
1520 cq->napi = &c->napi;
1521 cq->channel = c;
1522
1523 return err;
1524}
1525
3b77235b 1526static void mlx5e_free_cq(struct mlx5e_cq *cq)
f62b8bb8 1527{
1c1b5228 1528 mlx5_cqwq_destroy(&cq->wq_ctrl);
f62b8bb8
AV
1529}
1530
3b77235b 1531static int mlx5e_create_cq(struct mlx5e_cq *cq, struct mlx5e_cq_param *param)
f62b8bb8 1532{
a43b25da 1533 struct mlx5_core_dev *mdev = cq->mdev;
f62b8bb8
AV
1534 struct mlx5_core_cq *mcq = &cq->mcq;
1535
1536 void *in;
1537 void *cqc;
1538 int inlen;
0b6e26ce 1539 unsigned int irqn_not_used;
f62b8bb8
AV
1540 int eqn;
1541 int err;
1542
4d7d3ed9
YA
1543 err = mlx5_vector2eqn(mdev, param->eq_ix, &eqn, &irqn_not_used);
1544 if (err)
1545 return err;
1546
f62b8bb8 1547 inlen = MLX5_ST_SZ_BYTES(create_cq_in) +
1c1b5228 1548 sizeof(u64) * cq->wq_ctrl.frag_buf.npages;
1b9a07ee 1549 in = kvzalloc(inlen, GFP_KERNEL);
f62b8bb8
AV
1550 if (!in)
1551 return -ENOMEM;
1552
1553 cqc = MLX5_ADDR_OF(create_cq_in, in, cq_context);
1554
1555 memcpy(cqc, param->cqc, sizeof(param->cqc));
1556
1c1b5228
TT
1557 mlx5_fill_page_frag_array(&cq->wq_ctrl.frag_buf,
1558 (__be64 *)MLX5_ADDR_OF(create_cq_in, in, pas));
f62b8bb8 1559
9908aa29 1560 MLX5_SET(cqc, cqc, cq_period_mode, param->cq_period_mode);
f62b8bb8 1561 MLX5_SET(cqc, cqc, c_eqn, eqn);
30aa60b3 1562 MLX5_SET(cqc, cqc, uar_page, mdev->priv.uar->index);
1c1b5228 1563 MLX5_SET(cqc, cqc, log_page_size, cq->wq_ctrl.frag_buf.page_shift -
68cdf5d6 1564 MLX5_ADAPTER_PAGE_SHIFT);
f62b8bb8
AV
1565 MLX5_SET64(cqc, cqc, dbr_addr, cq->wq_ctrl.db.dma);
1566
1567 err = mlx5_core_create_cq(mdev, mcq, in, inlen);
1568
1569 kvfree(in);
1570
1571 if (err)
1572 return err;
1573
1574 mlx5e_cq_arm(cq);
1575
1576 return 0;
1577}
1578
3b77235b 1579static void mlx5e_destroy_cq(struct mlx5e_cq *cq)
f62b8bb8 1580{
a43b25da 1581 mlx5_core_destroy_cq(cq->mdev, &cq->mcq);
f62b8bb8
AV
1582}
1583
1584static int mlx5e_open_cq(struct mlx5e_channel *c,
6a9764ef 1585 struct mlx5e_cq_moder moder,
f62b8bb8 1586 struct mlx5e_cq_param *param,
6a9764ef 1587 struct mlx5e_cq *cq)
f62b8bb8 1588{
a43b25da 1589 struct mlx5_core_dev *mdev = c->mdev;
f62b8bb8 1590 int err;
f62b8bb8 1591
3b77235b 1592 err = mlx5e_alloc_cq(c, param, cq);
f62b8bb8
AV
1593 if (err)
1594 return err;
1595
3b77235b 1596 err = mlx5e_create_cq(cq, param);
f62b8bb8 1597 if (err)
3b77235b 1598 goto err_free_cq;
f62b8bb8 1599
7524a5d8 1600 if (MLX5_CAP_GEN(mdev, cq_moderation))
6a9764ef 1601 mlx5_core_modify_cq_moderation(mdev, &cq->mcq, moder.usec, moder.pkts);
f62b8bb8
AV
1602 return 0;
1603
3b77235b
SM
1604err_free_cq:
1605 mlx5e_free_cq(cq);
f62b8bb8
AV
1606
1607 return err;
1608}
1609
1610static void mlx5e_close_cq(struct mlx5e_cq *cq)
1611{
f62b8bb8 1612 mlx5e_destroy_cq(cq);
3b77235b 1613 mlx5e_free_cq(cq);
f62b8bb8
AV
1614}
1615
231243c8
SM
1616static int mlx5e_get_cpu(struct mlx5e_priv *priv, int ix)
1617{
7f2045c8 1618 return cpumask_first(priv->mdev->priv.irq_info[ix + MLX5_EQ_VEC_COMP_BASE].mask);
231243c8
SM
1619}
1620
f62b8bb8 1621static int mlx5e_open_tx_cqs(struct mlx5e_channel *c,
6a9764ef 1622 struct mlx5e_params *params,
f62b8bb8
AV
1623 struct mlx5e_channel_param *cparam)
1624{
f62b8bb8
AV
1625 int err;
1626 int tc;
1627
1628 for (tc = 0; tc < c->num_tc; tc++) {
6a9764ef
SM
1629 err = mlx5e_open_cq(c, params->tx_cq_moderation,
1630 &cparam->tx_cq, &c->sq[tc].cq);
f62b8bb8
AV
1631 if (err)
1632 goto err_close_tx_cqs;
f62b8bb8
AV
1633 }
1634
1635 return 0;
1636
1637err_close_tx_cqs:
1638 for (tc--; tc >= 0; tc--)
1639 mlx5e_close_cq(&c->sq[tc].cq);
1640
1641 return err;
1642}
1643
1644static void mlx5e_close_tx_cqs(struct mlx5e_channel *c)
1645{
1646 int tc;
1647
1648 for (tc = 0; tc < c->num_tc; tc++)
1649 mlx5e_close_cq(&c->sq[tc].cq);
1650}
1651
1652static int mlx5e_open_sqs(struct mlx5e_channel *c,
6a9764ef 1653 struct mlx5e_params *params,
f62b8bb8
AV
1654 struct mlx5e_channel_param *cparam)
1655{
1656 int err;
1657 int tc;
1658
6a9764ef
SM
1659 for (tc = 0; tc < params->num_tc; tc++) {
1660 int txq_ix = c->ix + tc * params->num_channels;
acc6c595 1661
a43b25da
SM
1662 err = mlx5e_open_txqsq(c, c->priv->tisn[tc], txq_ix,
1663 params, &cparam->sq, &c->sq[tc]);
f62b8bb8
AV
1664 if (err)
1665 goto err_close_sqs;
1666 }
1667
1668 return 0;
1669
1670err_close_sqs:
1671 for (tc--; tc >= 0; tc--)
31391048 1672 mlx5e_close_txqsq(&c->sq[tc]);
f62b8bb8
AV
1673
1674 return err;
1675}
1676
1677static void mlx5e_close_sqs(struct mlx5e_channel *c)
1678{
1679 int tc;
1680
1681 for (tc = 0; tc < c->num_tc; tc++)
31391048 1682 mlx5e_close_txqsq(&c->sq[tc]);
f62b8bb8
AV
1683}
1684
507f0c81 1685static int mlx5e_set_sq_maxrate(struct net_device *dev,
31391048 1686 struct mlx5e_txqsq *sq, u32 rate)
507f0c81
YP
1687{
1688 struct mlx5e_priv *priv = netdev_priv(dev);
1689 struct mlx5_core_dev *mdev = priv->mdev;
33ad9711 1690 struct mlx5e_modify_sq_param msp = {0};
507f0c81
YP
1691 u16 rl_index = 0;
1692 int err;
1693
1694 if (rate == sq->rate_limit)
1695 /* nothing to do */
1696 return 0;
1697
1698 if (sq->rate_limit)
1699 /* remove current rl index to free space to next ones */
1700 mlx5_rl_remove_rate(mdev, sq->rate_limit);
1701
1702 sq->rate_limit = 0;
1703
1704 if (rate) {
1705 err = mlx5_rl_add_rate(mdev, rate, &rl_index);
1706 if (err) {
1707 netdev_err(dev, "Failed configuring rate %u: %d\n",
1708 rate, err);
1709 return err;
1710 }
1711 }
1712
33ad9711
SM
1713 msp.curr_state = MLX5_SQC_STATE_RDY;
1714 msp.next_state = MLX5_SQC_STATE_RDY;
1715 msp.rl_index = rl_index;
1716 msp.rl_update = true;
a43b25da 1717 err = mlx5e_modify_sq(mdev, sq->sqn, &msp);
507f0c81
YP
1718 if (err) {
1719 netdev_err(dev, "Failed configuring rate %u: %d\n",
1720 rate, err);
1721 /* remove the rate from the table */
1722 if (rate)
1723 mlx5_rl_remove_rate(mdev, rate);
1724 return err;
1725 }
1726
1727 sq->rate_limit = rate;
1728 return 0;
1729}
1730
1731static int mlx5e_set_tx_maxrate(struct net_device *dev, int index, u32 rate)
1732{
1733 struct mlx5e_priv *priv = netdev_priv(dev);
1734 struct mlx5_core_dev *mdev = priv->mdev;
acc6c595 1735 struct mlx5e_txqsq *sq = priv->txq2sq[index];
507f0c81
YP
1736 int err = 0;
1737
1738 if (!mlx5_rl_is_supported(mdev)) {
1739 netdev_err(dev, "Rate limiting is not supported on this device\n");
1740 return -EINVAL;
1741 }
1742
1743 /* rate is given in Mb/sec, HW config is in Kb/sec */
1744 rate = rate << 10;
1745
1746 /* Check whether rate in valid range, 0 is always valid */
1747 if (rate && !mlx5_rl_is_in_range(mdev, rate)) {
1748 netdev_err(dev, "TX rate %u, is not in range\n", rate);
1749 return -ERANGE;
1750 }
1751
1752 mutex_lock(&priv->state_lock);
1753 if (test_bit(MLX5E_STATE_OPENED, &priv->state))
1754 err = mlx5e_set_sq_maxrate(dev, sq, rate);
1755 if (!err)
1756 priv->tx_rates[index] = rate;
1757 mutex_unlock(&priv->state_lock);
1758
1759 return err;
1760}
1761
f62b8bb8 1762static int mlx5e_open_channel(struct mlx5e_priv *priv, int ix,
6a9764ef 1763 struct mlx5e_params *params,
f62b8bb8
AV
1764 struct mlx5e_channel_param *cparam,
1765 struct mlx5e_channel **cp)
1766{
6a9764ef 1767 struct mlx5e_cq_moder icocq_moder = {0, 0};
f62b8bb8 1768 struct net_device *netdev = priv->netdev;
231243c8 1769 int cpu = mlx5e_get_cpu(priv, ix);
f62b8bb8 1770 struct mlx5e_channel *c;
a8c2eb15 1771 unsigned int irq;
f62b8bb8 1772 int err;
a8c2eb15 1773 int eqn;
f62b8bb8 1774
4d7d3ed9
YA
1775 err = mlx5_vector2eqn(priv->mdev, ix, &eqn, &irq);
1776 if (err)
1777 return err;
1778
231243c8 1779 c = kzalloc_node(sizeof(*c), GFP_KERNEL, cpu_to_node(cpu));
f62b8bb8
AV
1780 if (!c)
1781 return -ENOMEM;
1782
1783 c->priv = priv;
a43b25da
SM
1784 c->mdev = priv->mdev;
1785 c->tstamp = &priv->tstamp;
f62b8bb8 1786 c->ix = ix;
231243c8 1787 c->cpu = cpu;
f62b8bb8
AV
1788 c->pdev = &priv->mdev->pdev->dev;
1789 c->netdev = priv->netdev;
b50d292b 1790 c->mkey_be = cpu_to_be32(priv->mdev->mlx5e_res.mkey.key);
6a9764ef
SM
1791 c->num_tc = params->num_tc;
1792 c->xdp = !!params->xdp_prog;
cb3c7fd4 1793
a8c2eb15
TT
1794 c->irq_desc = irq_to_desc(irq);
1795
f62b8bb8
AV
1796 netif_napi_add(netdev, &c->napi, mlx5e_napi_poll, 64);
1797
6a9764ef 1798 err = mlx5e_open_cq(c, icocq_moder, &cparam->icosq_cq, &c->icosq.cq);
f62b8bb8
AV
1799 if (err)
1800 goto err_napi_del;
1801
6a9764ef 1802 err = mlx5e_open_tx_cqs(c, params, cparam);
d3c9bc27
TT
1803 if (err)
1804 goto err_close_icosq_cq;
1805
6a9764ef 1806 err = mlx5e_open_cq(c, params->rx_cq_moderation, &cparam->rx_cq, &c->rq.cq);
f62b8bb8
AV
1807 if (err)
1808 goto err_close_tx_cqs;
f62b8bb8 1809
d7a0ecab 1810 /* XDP SQ CQ params are same as normal TXQ sq CQ params */
6a9764ef
SM
1811 err = c->xdp ? mlx5e_open_cq(c, params->tx_cq_moderation,
1812 &cparam->tx_cq, &c->rq.xdpsq.cq) : 0;
d7a0ecab
SM
1813 if (err)
1814 goto err_close_rx_cq;
1815
f62b8bb8
AV
1816 napi_enable(&c->napi);
1817
6a9764ef 1818 err = mlx5e_open_icosq(c, params, &cparam->icosq, &c->icosq);
f62b8bb8
AV
1819 if (err)
1820 goto err_disable_napi;
1821
6a9764ef 1822 err = mlx5e_open_sqs(c, params, cparam);
d3c9bc27
TT
1823 if (err)
1824 goto err_close_icosq;
1825
6a9764ef 1826 err = c->xdp ? mlx5e_open_xdpsq(c, params, &cparam->xdp_sq, &c->rq.xdpsq) : 0;
d7a0ecab
SM
1827 if (err)
1828 goto err_close_sqs;
b5503b99 1829
6a9764ef 1830 err = mlx5e_open_rq(c, params, &cparam->rq, &c->rq);
f62b8bb8 1831 if (err)
b5503b99 1832 goto err_close_xdp_sq;
f62b8bb8 1833
f62b8bb8
AV
1834 *cp = c;
1835
1836 return 0;
b5503b99 1837err_close_xdp_sq:
d7a0ecab 1838 if (c->xdp)
31391048 1839 mlx5e_close_xdpsq(&c->rq.xdpsq);
f62b8bb8
AV
1840
1841err_close_sqs:
1842 mlx5e_close_sqs(c);
1843
d3c9bc27 1844err_close_icosq:
31391048 1845 mlx5e_close_icosq(&c->icosq);
d3c9bc27 1846
f62b8bb8
AV
1847err_disable_napi:
1848 napi_disable(&c->napi);
d7a0ecab 1849 if (c->xdp)
31871f87 1850 mlx5e_close_cq(&c->rq.xdpsq.cq);
d7a0ecab
SM
1851
1852err_close_rx_cq:
f62b8bb8
AV
1853 mlx5e_close_cq(&c->rq.cq);
1854
1855err_close_tx_cqs:
1856 mlx5e_close_tx_cqs(c);
1857
d3c9bc27
TT
1858err_close_icosq_cq:
1859 mlx5e_close_cq(&c->icosq.cq);
1860
f62b8bb8
AV
1861err_napi_del:
1862 netif_napi_del(&c->napi);
1863 kfree(c);
1864
1865 return err;
1866}
1867
acc6c595
SM
1868static void mlx5e_activate_channel(struct mlx5e_channel *c)
1869{
1870 int tc;
1871
1872 for (tc = 0; tc < c->num_tc; tc++)
1873 mlx5e_activate_txqsq(&c->sq[tc]);
1874 mlx5e_activate_rq(&c->rq);
231243c8 1875 netif_set_xps_queue(c->netdev, get_cpu_mask(c->cpu), c->ix);
acc6c595
SM
1876}
1877
1878static void mlx5e_deactivate_channel(struct mlx5e_channel *c)
1879{
1880 int tc;
1881
1882 mlx5e_deactivate_rq(&c->rq);
1883 for (tc = 0; tc < c->num_tc; tc++)
1884 mlx5e_deactivate_txqsq(&c->sq[tc]);
1885}
1886
f62b8bb8
AV
1887static void mlx5e_close_channel(struct mlx5e_channel *c)
1888{
1889 mlx5e_close_rq(&c->rq);
b5503b99 1890 if (c->xdp)
31391048 1891 mlx5e_close_xdpsq(&c->rq.xdpsq);
f62b8bb8 1892 mlx5e_close_sqs(c);
31391048 1893 mlx5e_close_icosq(&c->icosq);
f62b8bb8 1894 napi_disable(&c->napi);
b5503b99 1895 if (c->xdp)
31871f87 1896 mlx5e_close_cq(&c->rq.xdpsq.cq);
f62b8bb8
AV
1897 mlx5e_close_cq(&c->rq.cq);
1898 mlx5e_close_tx_cqs(c);
d3c9bc27 1899 mlx5e_close_cq(&c->icosq.cq);
f62b8bb8 1900 netif_napi_del(&c->napi);
7ae92ae5 1901
f62b8bb8
AV
1902 kfree(c);
1903}
1904
1905static void mlx5e_build_rq_param(struct mlx5e_priv *priv,
6a9764ef 1906 struct mlx5e_params *params,
f62b8bb8
AV
1907 struct mlx5e_rq_param *param)
1908{
1909 void *rqc = param->rqc;
1910 void *wq = MLX5_ADDR_OF(rqc, rqc, wq);
1911
6a9764ef 1912 switch (params->rq_wq_type) {
461017cb 1913 case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ:
6a9764ef
SM
1914 MLX5_SET(wq, wq, log_wqe_num_of_strides, params->mpwqe_log_num_strides - 9);
1915 MLX5_SET(wq, wq, log_wqe_stride_size, params->mpwqe_log_stride_sz - 6);
461017cb
TT
1916 MLX5_SET(wq, wq, wq_type, MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ);
1917 break;
1918 default: /* MLX5_WQ_TYPE_LINKED_LIST */
1919 MLX5_SET(wq, wq, wq_type, MLX5_WQ_TYPE_LINKED_LIST);
1920 }
1921
f62b8bb8
AV
1922 MLX5_SET(wq, wq, end_padding_mode, MLX5_WQ_END_PAD_MODE_ALIGN);
1923 MLX5_SET(wq, wq, log_wq_stride, ilog2(sizeof(struct mlx5e_rx_wqe)));
6a9764ef 1924 MLX5_SET(wq, wq, log_wq_sz, params->log_rq_size);
b50d292b 1925 MLX5_SET(wq, wq, pd, priv->mdev->mlx5e_res.pdn);
593cf338 1926 MLX5_SET(rqc, rqc, counter_set_id, priv->q_counter);
6a9764ef 1927 MLX5_SET(rqc, rqc, vsd, params->vlan_strip_disable);
102722fc 1928 MLX5_SET(rqc, rqc, scatter_fcs, params->scatter_fcs_en);
f62b8bb8 1929
311c7c71 1930 param->wq.buf_numa_node = dev_to_node(&priv->mdev->pdev->dev);
f62b8bb8
AV
1931 param->wq.linear = 1;
1932}
1933
0ff59f96
GP
1934static void mlx5e_build_drop_rq_param(struct mlx5_core_dev *mdev,
1935 struct mlx5e_rq_param *param)
556dd1b9
TT
1936{
1937 void *rqc = param->rqc;
1938 void *wq = MLX5_ADDR_OF(rqc, rqc, wq);
1939
1940 MLX5_SET(wq, wq, wq_type, MLX5_WQ_TYPE_LINKED_LIST);
1941 MLX5_SET(wq, wq, log_wq_stride, ilog2(sizeof(struct mlx5e_rx_wqe)));
0ff59f96
GP
1942
1943 param->wq.buf_numa_node = dev_to_node(&mdev->pdev->dev);
556dd1b9
TT
1944}
1945
d3c9bc27
TT
1946static void mlx5e_build_sq_param_common(struct mlx5e_priv *priv,
1947 struct mlx5e_sq_param *param)
f62b8bb8
AV
1948{
1949 void *sqc = param->sqc;
1950 void *wq = MLX5_ADDR_OF(sqc, sqc, wq);
1951
f62b8bb8 1952 MLX5_SET(wq, wq, log_wq_stride, ilog2(MLX5_SEND_WQE_BB));
b50d292b 1953 MLX5_SET(wq, wq, pd, priv->mdev->mlx5e_res.pdn);
f62b8bb8 1954
311c7c71 1955 param->wq.buf_numa_node = dev_to_node(&priv->mdev->pdev->dev);
d3c9bc27
TT
1956}
1957
1958static void mlx5e_build_sq_param(struct mlx5e_priv *priv,
6a9764ef 1959 struct mlx5e_params *params,
d3c9bc27
TT
1960 struct mlx5e_sq_param *param)
1961{
1962 void *sqc = param->sqc;
1963 void *wq = MLX5_ADDR_OF(sqc, sqc, wq);
1964
1965 mlx5e_build_sq_param_common(priv, param);
6a9764ef 1966 MLX5_SET(wq, wq, log_wq_sz, params->log_sq_size);
2ac9cfe7 1967 MLX5_SET(sqc, sqc, allow_swp, !!MLX5_IPSEC_DEV(priv->mdev));
f62b8bb8
AV
1968}
1969
1970static void mlx5e_build_common_cq_param(struct mlx5e_priv *priv,
1971 struct mlx5e_cq_param *param)
1972{
1973 void *cqc = param->cqc;
1974
30aa60b3 1975 MLX5_SET(cqc, cqc, uar_page, priv->mdev->priv.uar->index);
f62b8bb8
AV
1976}
1977
1978static void mlx5e_build_rx_cq_param(struct mlx5e_priv *priv,
6a9764ef 1979 struct mlx5e_params *params,
f62b8bb8
AV
1980 struct mlx5e_cq_param *param)
1981{
1982 void *cqc = param->cqc;
461017cb 1983 u8 log_cq_size;
f62b8bb8 1984
6a9764ef 1985 switch (params->rq_wq_type) {
461017cb 1986 case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ:
6a9764ef 1987 log_cq_size = params->log_rq_size + params->mpwqe_log_num_strides;
461017cb
TT
1988 break;
1989 default: /* MLX5_WQ_TYPE_LINKED_LIST */
6a9764ef 1990 log_cq_size = params->log_rq_size;
461017cb
TT
1991 }
1992
1993 MLX5_SET(cqc, cqc, log_cq_size, log_cq_size);
6a9764ef 1994 if (MLX5E_GET_PFLAG(params, MLX5E_PFLAG_RX_CQE_COMPRESS)) {
7219ab34
TT
1995 MLX5_SET(cqc, cqc, mini_cqe_res_format, MLX5_CQE_FORMAT_CSUM);
1996 MLX5_SET(cqc, cqc, cqe_comp_en, 1);
1997 }
f62b8bb8
AV
1998
1999 mlx5e_build_common_cq_param(priv, param);
0088cbbc 2000 param->cq_period_mode = params->rx_cq_moderation.cq_period_mode;
f62b8bb8
AV
2001}
2002
2003static void mlx5e_build_tx_cq_param(struct mlx5e_priv *priv,
6a9764ef 2004 struct mlx5e_params *params,
f62b8bb8
AV
2005 struct mlx5e_cq_param *param)
2006{
2007 void *cqc = param->cqc;
2008
6a9764ef 2009 MLX5_SET(cqc, cqc, log_cq_size, params->log_sq_size);
f62b8bb8
AV
2010
2011 mlx5e_build_common_cq_param(priv, param);
0088cbbc 2012 param->cq_period_mode = params->tx_cq_moderation.cq_period_mode;
f62b8bb8
AV
2013}
2014
d3c9bc27 2015static void mlx5e_build_ico_cq_param(struct mlx5e_priv *priv,
6a9764ef
SM
2016 u8 log_wq_size,
2017 struct mlx5e_cq_param *param)
d3c9bc27
TT
2018{
2019 void *cqc = param->cqc;
2020
2021 MLX5_SET(cqc, cqc, log_cq_size, log_wq_size);
2022
2023 mlx5e_build_common_cq_param(priv, param);
9908aa29
TT
2024
2025 param->cq_period_mode = MLX5_CQ_PERIOD_MODE_START_FROM_EQE;
d3c9bc27
TT
2026}
2027
2028static void mlx5e_build_icosq_param(struct mlx5e_priv *priv,
6a9764ef
SM
2029 u8 log_wq_size,
2030 struct mlx5e_sq_param *param)
d3c9bc27
TT
2031{
2032 void *sqc = param->sqc;
2033 void *wq = MLX5_ADDR_OF(sqc, sqc, wq);
2034
2035 mlx5e_build_sq_param_common(priv, param);
2036
2037 MLX5_SET(wq, wq, log_wq_sz, log_wq_size);
bc77b240 2038 MLX5_SET(sqc, sqc, reg_umr, MLX5_CAP_ETH(priv->mdev, reg_umr_sq));
d3c9bc27
TT
2039}
2040
b5503b99 2041static void mlx5e_build_xdpsq_param(struct mlx5e_priv *priv,
6a9764ef 2042 struct mlx5e_params *params,
b5503b99
SM
2043 struct mlx5e_sq_param *param)
2044{
2045 void *sqc = param->sqc;
2046 void *wq = MLX5_ADDR_OF(sqc, sqc, wq);
2047
2048 mlx5e_build_sq_param_common(priv, param);
6a9764ef 2049 MLX5_SET(wq, wq, log_wq_sz, params->log_sq_size);
b5503b99
SM
2050}
2051
6a9764ef
SM
2052static void mlx5e_build_channel_param(struct mlx5e_priv *priv,
2053 struct mlx5e_params *params,
2054 struct mlx5e_channel_param *cparam)
f62b8bb8 2055{
bc77b240 2056 u8 icosq_log_wq_sz = MLX5E_PARAMS_MINIMUM_LOG_SQ_SIZE;
d3c9bc27 2057
6a9764ef
SM
2058 mlx5e_build_rq_param(priv, params, &cparam->rq);
2059 mlx5e_build_sq_param(priv, params, &cparam->sq);
2060 mlx5e_build_xdpsq_param(priv, params, &cparam->xdp_sq);
2061 mlx5e_build_icosq_param(priv, icosq_log_wq_sz, &cparam->icosq);
2062 mlx5e_build_rx_cq_param(priv, params, &cparam->rx_cq);
2063 mlx5e_build_tx_cq_param(priv, params, &cparam->tx_cq);
2064 mlx5e_build_ico_cq_param(priv, icosq_log_wq_sz, &cparam->icosq_cq);
f62b8bb8
AV
2065}
2066
55c2503d
SM
2067int mlx5e_open_channels(struct mlx5e_priv *priv,
2068 struct mlx5e_channels *chs)
f62b8bb8 2069{
6b87663f 2070 struct mlx5e_channel_param *cparam;
03289b88 2071 int err = -ENOMEM;
f62b8bb8 2072 int i;
f62b8bb8 2073
6a9764ef 2074 chs->num = chs->params.num_channels;
03289b88 2075
ff9c852f 2076 chs->c = kcalloc(chs->num, sizeof(struct mlx5e_channel *), GFP_KERNEL);
6b87663f 2077 cparam = kzalloc(sizeof(struct mlx5e_channel_param), GFP_KERNEL);
acc6c595
SM
2078 if (!chs->c || !cparam)
2079 goto err_free;
f62b8bb8 2080
6a9764ef 2081 mlx5e_build_channel_param(priv, &chs->params, cparam);
ff9c852f 2082 for (i = 0; i < chs->num; i++) {
6a9764ef 2083 err = mlx5e_open_channel(priv, i, &chs->params, cparam, &chs->c[i]);
f62b8bb8
AV
2084 if (err)
2085 goto err_close_channels;
2086 }
2087
6b87663f 2088 kfree(cparam);
f62b8bb8
AV
2089 return 0;
2090
2091err_close_channels:
2092 for (i--; i >= 0; i--)
ff9c852f 2093 mlx5e_close_channel(chs->c[i]);
f62b8bb8 2094
acc6c595 2095err_free:
ff9c852f 2096 kfree(chs->c);
6b87663f 2097 kfree(cparam);
ff9c852f 2098 chs->num = 0;
f62b8bb8
AV
2099 return err;
2100}
2101
acc6c595 2102static void mlx5e_activate_channels(struct mlx5e_channels *chs)
f62b8bb8
AV
2103{
2104 int i;
2105
acc6c595
SM
2106 for (i = 0; i < chs->num; i++)
2107 mlx5e_activate_channel(chs->c[i]);
2108}
2109
2110static int mlx5e_wait_channels_min_rx_wqes(struct mlx5e_channels *chs)
2111{
2112 int err = 0;
2113 int i;
2114
2115 for (i = 0; i < chs->num; i++) {
2116 err = mlx5e_wait_for_min_rx_wqes(&chs->c[i]->rq);
2117 if (err)
2118 break;
2119 }
2120
2121 return err;
2122}
2123
2124static void mlx5e_deactivate_channels(struct mlx5e_channels *chs)
2125{
2126 int i;
2127
2128 for (i = 0; i < chs->num; i++)
2129 mlx5e_deactivate_channel(chs->c[i]);
2130}
2131
55c2503d 2132void mlx5e_close_channels(struct mlx5e_channels *chs)
acc6c595
SM
2133{
2134 int i;
c3b7c5c9 2135
ff9c852f
SM
2136 for (i = 0; i < chs->num; i++)
2137 mlx5e_close_channel(chs->c[i]);
f62b8bb8 2138
ff9c852f
SM
2139 kfree(chs->c);
2140 chs->num = 0;
f62b8bb8
AV
2141}
2142
a5f97fee
SM
2143static int
2144mlx5e_create_rqt(struct mlx5e_priv *priv, int sz, struct mlx5e_rqt *rqt)
f62b8bb8
AV
2145{
2146 struct mlx5_core_dev *mdev = priv->mdev;
f62b8bb8
AV
2147 void *rqtc;
2148 int inlen;
2149 int err;
1da36696 2150 u32 *in;
a5f97fee 2151 int i;
f62b8bb8 2152
f62b8bb8 2153 inlen = MLX5_ST_SZ_BYTES(create_rqt_in) + sizeof(u32) * sz;
1b9a07ee 2154 in = kvzalloc(inlen, GFP_KERNEL);
f62b8bb8
AV
2155 if (!in)
2156 return -ENOMEM;
2157
2158 rqtc = MLX5_ADDR_OF(create_rqt_in, in, rqt_context);
2159
2160 MLX5_SET(rqtc, rqtc, rqt_actual_size, sz);
2161 MLX5_SET(rqtc, rqtc, rqt_max_size, sz);
2162
a5f97fee
SM
2163 for (i = 0; i < sz; i++)
2164 MLX5_SET(rqtc, rqtc, rq_num[i], priv->drop_rq.rqn);
2be6967c 2165
398f3351
HHZ
2166 err = mlx5_core_create_rqt(mdev, in, inlen, &rqt->rqtn);
2167 if (!err)
2168 rqt->enabled = true;
f62b8bb8
AV
2169
2170 kvfree(in);
1da36696
TT
2171 return err;
2172}
2173
cb67b832 2174void mlx5e_destroy_rqt(struct mlx5e_priv *priv, struct mlx5e_rqt *rqt)
1da36696 2175{
398f3351
HHZ
2176 rqt->enabled = false;
2177 mlx5_core_destroy_rqt(priv->mdev, rqt->rqtn);
1da36696
TT
2178}
2179
8f493ffd 2180int mlx5e_create_indirect_rqt(struct mlx5e_priv *priv)
6bfd390b
HHZ
2181{
2182 struct mlx5e_rqt *rqt = &priv->indir_rqt;
8f493ffd 2183 int err;
6bfd390b 2184
8f493ffd
SM
2185 err = mlx5e_create_rqt(priv, MLX5E_INDIR_RQT_SIZE, rqt);
2186 if (err)
2187 mlx5_core_warn(priv->mdev, "create indirect rqts failed, %d\n", err);
2188 return err;
6bfd390b
HHZ
2189}
2190
cb67b832 2191int mlx5e_create_direct_rqts(struct mlx5e_priv *priv)
1da36696 2192{
398f3351 2193 struct mlx5e_rqt *rqt;
1da36696
TT
2194 int err;
2195 int ix;
2196
6bfd390b 2197 for (ix = 0; ix < priv->profile->max_nch(priv->mdev); ix++) {
398f3351 2198 rqt = &priv->direct_tir[ix].rqt;
a5f97fee 2199 err = mlx5e_create_rqt(priv, 1 /*size */, rqt);
1da36696
TT
2200 if (err)
2201 goto err_destroy_rqts;
2202 }
2203
2204 return 0;
2205
2206err_destroy_rqts:
8f493ffd 2207 mlx5_core_warn(priv->mdev, "create direct rqts failed, %d\n", err);
1da36696 2208 for (ix--; ix >= 0; ix--)
398f3351 2209 mlx5e_destroy_rqt(priv, &priv->direct_tir[ix].rqt);
1da36696 2210
f62b8bb8
AV
2211 return err;
2212}
2213
8f493ffd
SM
2214void mlx5e_destroy_direct_rqts(struct mlx5e_priv *priv)
2215{
2216 int i;
2217
2218 for (i = 0; i < priv->profile->max_nch(priv->mdev); i++)
2219 mlx5e_destroy_rqt(priv, &priv->direct_tir[i].rqt);
2220}
2221
a5f97fee
SM
2222static int mlx5e_rx_hash_fn(int hfunc)
2223{
2224 return (hfunc == ETH_RSS_HASH_TOP) ?
2225 MLX5_RX_HASH_FN_TOEPLITZ :
2226 MLX5_RX_HASH_FN_INVERTED_XOR8;
2227}
2228
2229static int mlx5e_bits_invert(unsigned long a, int size)
2230{
2231 int inv = 0;
2232 int i;
2233
2234 for (i = 0; i < size; i++)
2235 inv |= (test_bit(size - i - 1, &a) ? 1 : 0) << i;
2236
2237 return inv;
2238}
2239
2240static void mlx5e_fill_rqt_rqns(struct mlx5e_priv *priv, int sz,
2241 struct mlx5e_redirect_rqt_param rrp, void *rqtc)
2242{
2243 int i;
2244
2245 for (i = 0; i < sz; i++) {
2246 u32 rqn;
2247
2248 if (rrp.is_rss) {
2249 int ix = i;
2250
2251 if (rrp.rss.hfunc == ETH_RSS_HASH_XOR)
2252 ix = mlx5e_bits_invert(i, ilog2(sz));
2253
6a9764ef 2254 ix = priv->channels.params.indirection_rqt[ix];
a5f97fee
SM
2255 rqn = rrp.rss.channels->c[ix]->rq.rqn;
2256 } else {
2257 rqn = rrp.rqn;
2258 }
2259 MLX5_SET(rqtc, rqtc, rq_num[i], rqn);
2260 }
2261}
2262
2263int mlx5e_redirect_rqt(struct mlx5e_priv *priv, u32 rqtn, int sz,
2264 struct mlx5e_redirect_rqt_param rrp)
5c50368f
AS
2265{
2266 struct mlx5_core_dev *mdev = priv->mdev;
5c50368f
AS
2267 void *rqtc;
2268 int inlen;
1da36696 2269 u32 *in;
5c50368f
AS
2270 int err;
2271
5c50368f 2272 inlen = MLX5_ST_SZ_BYTES(modify_rqt_in) + sizeof(u32) * sz;
1b9a07ee 2273 in = kvzalloc(inlen, GFP_KERNEL);
5c50368f
AS
2274 if (!in)
2275 return -ENOMEM;
2276
2277 rqtc = MLX5_ADDR_OF(modify_rqt_in, in, ctx);
2278
2279 MLX5_SET(rqtc, rqtc, rqt_actual_size, sz);
5c50368f 2280 MLX5_SET(modify_rqt_in, in, bitmask.rqn_list, 1);
a5f97fee 2281 mlx5e_fill_rqt_rqns(priv, sz, rrp, rqtc);
1da36696 2282 err = mlx5_core_modify_rqt(mdev, rqtn, in, inlen);
5c50368f
AS
2283
2284 kvfree(in);
5c50368f
AS
2285 return err;
2286}
2287
a5f97fee
SM
2288static u32 mlx5e_get_direct_rqn(struct mlx5e_priv *priv, int ix,
2289 struct mlx5e_redirect_rqt_param rrp)
2290{
2291 if (!rrp.is_rss)
2292 return rrp.rqn;
2293
2294 if (ix >= rrp.rss.channels->num)
2295 return priv->drop_rq.rqn;
2296
2297 return rrp.rss.channels->c[ix]->rq.rqn;
2298}
2299
2300static void mlx5e_redirect_rqts(struct mlx5e_priv *priv,
2301 struct mlx5e_redirect_rqt_param rrp)
40ab6a6e 2302{
1da36696
TT
2303 u32 rqtn;
2304 int ix;
2305
398f3351 2306 if (priv->indir_rqt.enabled) {
a5f97fee 2307 /* RSS RQ table */
398f3351 2308 rqtn = priv->indir_rqt.rqtn;
a5f97fee 2309 mlx5e_redirect_rqt(priv, rqtn, MLX5E_INDIR_RQT_SIZE, rrp);
398f3351
HHZ
2310 }
2311
a5f97fee
SM
2312 for (ix = 0; ix < priv->profile->max_nch(priv->mdev); ix++) {
2313 struct mlx5e_redirect_rqt_param direct_rrp = {
2314 .is_rss = false,
95632791
AM
2315 {
2316 .rqn = mlx5e_get_direct_rqn(priv, ix, rrp)
2317 },
a5f97fee
SM
2318 };
2319
2320 /* Direct RQ Tables */
398f3351
HHZ
2321 if (!priv->direct_tir[ix].rqt.enabled)
2322 continue;
a5f97fee 2323
398f3351 2324 rqtn = priv->direct_tir[ix].rqt.rqtn;
a5f97fee 2325 mlx5e_redirect_rqt(priv, rqtn, 1, direct_rrp);
1da36696 2326 }
40ab6a6e
AS
2327}
2328
a5f97fee
SM
2329static void mlx5e_redirect_rqts_to_channels(struct mlx5e_priv *priv,
2330 struct mlx5e_channels *chs)
2331{
2332 struct mlx5e_redirect_rqt_param rrp = {
2333 .is_rss = true,
95632791
AM
2334 {
2335 .rss = {
2336 .channels = chs,
2337 .hfunc = chs->params.rss_hfunc,
2338 }
2339 },
a5f97fee
SM
2340 };
2341
2342 mlx5e_redirect_rqts(priv, rrp);
2343}
2344
2345static void mlx5e_redirect_rqts_to_drop(struct mlx5e_priv *priv)
2346{
2347 struct mlx5e_redirect_rqt_param drop_rrp = {
2348 .is_rss = false,
95632791
AM
2349 {
2350 .rqn = priv->drop_rq.rqn,
2351 },
a5f97fee
SM
2352 };
2353
2354 mlx5e_redirect_rqts(priv, drop_rrp);
2355}
2356
6a9764ef 2357static void mlx5e_build_tir_ctx_lro(struct mlx5e_params *params, void *tirc)
5c50368f 2358{
6a9764ef 2359 if (!params->lro_en)
5c50368f
AS
2360 return;
2361
2362#define ROUGH_MAX_L2_L3_HDR_SZ 256
2363
2364 MLX5_SET(tirc, tirc, lro_enable_mask,
2365 MLX5_TIRC_LRO_ENABLE_MASK_IPV4_LRO |
2366 MLX5_TIRC_LRO_ENABLE_MASK_IPV6_LRO);
2367 MLX5_SET(tirc, tirc, lro_max_ip_payload_size,
6a9764ef
SM
2368 (params->lro_wqe_sz - ROUGH_MAX_L2_L3_HDR_SZ) >> 8);
2369 MLX5_SET(tirc, tirc, lro_timeout_period_usecs, params->lro_timeout);
5c50368f
AS
2370}
2371
6a9764ef
SM
2372void mlx5e_build_indir_tir_ctx_hash(struct mlx5e_params *params,
2373 enum mlx5e_traffic_types tt,
7b3722fa 2374 void *tirc, bool inner)
bdfc028d 2375{
7b3722fa
GP
2376 void *hfso = inner ? MLX5_ADDR_OF(tirc, tirc, rx_hash_field_selector_inner) :
2377 MLX5_ADDR_OF(tirc, tirc, rx_hash_field_selector_outer);
a100ff3e
GP
2378
2379#define MLX5_HASH_IP (MLX5_HASH_FIELD_SEL_SRC_IP |\
2380 MLX5_HASH_FIELD_SEL_DST_IP)
2381
2382#define MLX5_HASH_IP_L4PORTS (MLX5_HASH_FIELD_SEL_SRC_IP |\
2383 MLX5_HASH_FIELD_SEL_DST_IP |\
2384 MLX5_HASH_FIELD_SEL_L4_SPORT |\
2385 MLX5_HASH_FIELD_SEL_L4_DPORT)
2386
2387#define MLX5_HASH_IP_IPSEC_SPI (MLX5_HASH_FIELD_SEL_SRC_IP |\
2388 MLX5_HASH_FIELD_SEL_DST_IP |\
2389 MLX5_HASH_FIELD_SEL_IPSEC_SPI)
2390
6a9764ef
SM
2391 MLX5_SET(tirc, tirc, rx_hash_fn, mlx5e_rx_hash_fn(params->rss_hfunc));
2392 if (params->rss_hfunc == ETH_RSS_HASH_TOP) {
bdfc028d
TT
2393 void *rss_key = MLX5_ADDR_OF(tirc, tirc,
2394 rx_hash_toeplitz_key);
2395 size_t len = MLX5_FLD_SZ_BYTES(tirc,
2396 rx_hash_toeplitz_key);
2397
2398 MLX5_SET(tirc, tirc, rx_hash_symmetric, 1);
6a9764ef 2399 memcpy(rss_key, params->toeplitz_hash_key, len);
bdfc028d 2400 }
a100ff3e
GP
2401
2402 switch (tt) {
2403 case MLX5E_TT_IPV4_TCP:
2404 MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
2405 MLX5_L3_PROT_TYPE_IPV4);
2406 MLX5_SET(rx_hash_field_select, hfso, l4_prot_type,
2407 MLX5_L4_PROT_TYPE_TCP);
2408 MLX5_SET(rx_hash_field_select, hfso, selected_fields,
2409 MLX5_HASH_IP_L4PORTS);
2410 break;
2411
2412 case MLX5E_TT_IPV6_TCP:
2413 MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
2414 MLX5_L3_PROT_TYPE_IPV6);
2415 MLX5_SET(rx_hash_field_select, hfso, l4_prot_type,
2416 MLX5_L4_PROT_TYPE_TCP);
2417 MLX5_SET(rx_hash_field_select, hfso, selected_fields,
2418 MLX5_HASH_IP_L4PORTS);
2419 break;
2420
2421 case MLX5E_TT_IPV4_UDP:
2422 MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
2423 MLX5_L3_PROT_TYPE_IPV4);
2424 MLX5_SET(rx_hash_field_select, hfso, l4_prot_type,
2425 MLX5_L4_PROT_TYPE_UDP);
2426 MLX5_SET(rx_hash_field_select, hfso, selected_fields,
2427 MLX5_HASH_IP_L4PORTS);
2428 break;
2429
2430 case MLX5E_TT_IPV6_UDP:
2431 MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
2432 MLX5_L3_PROT_TYPE_IPV6);
2433 MLX5_SET(rx_hash_field_select, hfso, l4_prot_type,
2434 MLX5_L4_PROT_TYPE_UDP);
2435 MLX5_SET(rx_hash_field_select, hfso, selected_fields,
2436 MLX5_HASH_IP_L4PORTS);
2437 break;
2438
2439 case MLX5E_TT_IPV4_IPSEC_AH:
2440 MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
2441 MLX5_L3_PROT_TYPE_IPV4);
2442 MLX5_SET(rx_hash_field_select, hfso, selected_fields,
2443 MLX5_HASH_IP_IPSEC_SPI);
2444 break;
2445
2446 case MLX5E_TT_IPV6_IPSEC_AH:
2447 MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
2448 MLX5_L3_PROT_TYPE_IPV6);
2449 MLX5_SET(rx_hash_field_select, hfso, selected_fields,
2450 MLX5_HASH_IP_IPSEC_SPI);
2451 break;
2452
2453 case MLX5E_TT_IPV4_IPSEC_ESP:
2454 MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
2455 MLX5_L3_PROT_TYPE_IPV4);
2456 MLX5_SET(rx_hash_field_select, hfso, selected_fields,
2457 MLX5_HASH_IP_IPSEC_SPI);
2458 break;
2459
2460 case MLX5E_TT_IPV6_IPSEC_ESP:
2461 MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
2462 MLX5_L3_PROT_TYPE_IPV6);
2463 MLX5_SET(rx_hash_field_select, hfso, selected_fields,
2464 MLX5_HASH_IP_IPSEC_SPI);
2465 break;
2466
2467 case MLX5E_TT_IPV4:
2468 MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
2469 MLX5_L3_PROT_TYPE_IPV4);
2470 MLX5_SET(rx_hash_field_select, hfso, selected_fields,
2471 MLX5_HASH_IP);
2472 break;
2473
2474 case MLX5E_TT_IPV6:
2475 MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
2476 MLX5_L3_PROT_TYPE_IPV6);
2477 MLX5_SET(rx_hash_field_select, hfso, selected_fields,
2478 MLX5_HASH_IP);
2479 break;
2480 default:
2481 WARN_ONCE(true, "%s: bad traffic type!\n", __func__);
2482 }
bdfc028d
TT
2483}
2484
ab0394fe 2485static int mlx5e_modify_tirs_lro(struct mlx5e_priv *priv)
5c50368f
AS
2486{
2487 struct mlx5_core_dev *mdev = priv->mdev;
2488
2489 void *in;
2490 void *tirc;
2491 int inlen;
2492 int err;
ab0394fe 2493 int tt;
1da36696 2494 int ix;
5c50368f
AS
2495
2496 inlen = MLX5_ST_SZ_BYTES(modify_tir_in);
1b9a07ee 2497 in = kvzalloc(inlen, GFP_KERNEL);
5c50368f
AS
2498 if (!in)
2499 return -ENOMEM;
2500
2501 MLX5_SET(modify_tir_in, in, bitmask.lro, 1);
2502 tirc = MLX5_ADDR_OF(modify_tir_in, in, ctx);
2503
6a9764ef 2504 mlx5e_build_tir_ctx_lro(&priv->channels.params, tirc);
5c50368f 2505
1da36696 2506 for (tt = 0; tt < MLX5E_NUM_INDIR_TIRS; tt++) {
724b2aa1 2507 err = mlx5_core_modify_tir(mdev, priv->indir_tir[tt].tirn, in,
1da36696 2508 inlen);
ab0394fe 2509 if (err)
1da36696 2510 goto free_in;
ab0394fe 2511 }
5c50368f 2512
6bfd390b 2513 for (ix = 0; ix < priv->profile->max_nch(priv->mdev); ix++) {
1da36696
TT
2514 err = mlx5_core_modify_tir(mdev, priv->direct_tir[ix].tirn,
2515 in, inlen);
2516 if (err)
2517 goto free_in;
2518 }
2519
2520free_in:
5c50368f
AS
2521 kvfree(in);
2522
2523 return err;
2524}
2525
7b3722fa
GP
2526static void mlx5e_build_inner_indir_tir_ctx(struct mlx5e_priv *priv,
2527 enum mlx5e_traffic_types tt,
2528 u32 *tirc)
2529{
2530 MLX5_SET(tirc, tirc, transport_domain, priv->mdev->mlx5e_res.td.tdn);
2531
2532 mlx5e_build_tir_ctx_lro(&priv->channels.params, tirc);
2533
2534 MLX5_SET(tirc, tirc, disp_type, MLX5_TIRC_DISP_TYPE_INDIRECT);
2535 MLX5_SET(tirc, tirc, indirect_table, priv->indir_rqt.rqtn);
2536 MLX5_SET(tirc, tirc, tunneled_offload_en, 0x1);
2537
2538 mlx5e_build_indir_tir_ctx_hash(&priv->channels.params, tt, tirc, true);
2539}
2540
cd255eff 2541static int mlx5e_set_mtu(struct mlx5e_priv *priv, u16 mtu)
40ab6a6e 2542{
40ab6a6e 2543 struct mlx5_core_dev *mdev = priv->mdev;
c139dbfd 2544 u16 hw_mtu = MLX5E_SW2HW_MTU(priv, mtu);
40ab6a6e
AS
2545 int err;
2546
cd255eff 2547 err = mlx5_set_port_mtu(mdev, hw_mtu, 1);
40ab6a6e
AS
2548 if (err)
2549 return err;
2550
cd255eff
SM
2551 /* Update vport context MTU */
2552 mlx5_modify_nic_vport_mtu(mdev, hw_mtu);
2553 return 0;
2554}
40ab6a6e 2555
cd255eff
SM
2556static void mlx5e_query_mtu(struct mlx5e_priv *priv, u16 *mtu)
2557{
2558 struct mlx5_core_dev *mdev = priv->mdev;
2559 u16 hw_mtu = 0;
2560 int err;
40ab6a6e 2561
cd255eff
SM
2562 err = mlx5_query_nic_vport_mtu(mdev, &hw_mtu);
2563 if (err || !hw_mtu) /* fallback to port oper mtu */
2564 mlx5_query_port_oper_mtu(mdev, &hw_mtu, 1);
2565
c139dbfd 2566 *mtu = MLX5E_HW2SW_MTU(priv, hw_mtu);
cd255eff
SM
2567}
2568
2e20a151 2569static int mlx5e_set_dev_port_mtu(struct mlx5e_priv *priv)
cd255eff 2570{
2e20a151 2571 struct net_device *netdev = priv->netdev;
cd255eff
SM
2572 u16 mtu;
2573 int err;
2574
2575 err = mlx5e_set_mtu(priv, netdev->mtu);
2576 if (err)
2577 return err;
40ab6a6e 2578
cd255eff
SM
2579 mlx5e_query_mtu(priv, &mtu);
2580 if (mtu != netdev->mtu)
2581 netdev_warn(netdev, "%s: VPort MTU %d is different than netdev mtu %d\n",
2582 __func__, mtu, netdev->mtu);
40ab6a6e 2583
cd255eff 2584 netdev->mtu = mtu;
40ab6a6e
AS
2585 return 0;
2586}
2587
08fb1dac
SM
2588static void mlx5e_netdev_set_tcs(struct net_device *netdev)
2589{
2590 struct mlx5e_priv *priv = netdev_priv(netdev);
6a9764ef
SM
2591 int nch = priv->channels.params.num_channels;
2592 int ntc = priv->channels.params.num_tc;
08fb1dac
SM
2593 int tc;
2594
2595 netdev_reset_tc(netdev);
2596
2597 if (ntc == 1)
2598 return;
2599
2600 netdev_set_num_tc(netdev, ntc);
2601
7ccdd084
RS
2602 /* Map netdev TCs to offset 0
2603 * We have our own UP to TXQ mapping for QoS
2604 */
08fb1dac 2605 for (tc = 0; tc < ntc; tc++)
7ccdd084 2606 netdev_set_tc_queue(netdev, tc, nch, 0);
08fb1dac
SM
2607}
2608
acc6c595
SM
2609static void mlx5e_build_channels_tx_maps(struct mlx5e_priv *priv)
2610{
2611 struct mlx5e_channel *c;
2612 struct mlx5e_txqsq *sq;
2613 int i, tc;
2614
2615 for (i = 0; i < priv->channels.num; i++)
2616 for (tc = 0; tc < priv->profile->max_tc; tc++)
2617 priv->channel_tc2txq[i][tc] = i + tc * priv->channels.num;
2618
2619 for (i = 0; i < priv->channels.num; i++) {
2620 c = priv->channels.c[i];
2621 for (tc = 0; tc < c->num_tc; tc++) {
2622 sq = &c->sq[tc];
2623 priv->txq2sq[sq->txq_ix] = sq;
2624 }
2625 }
2626}
2627
603f4a45 2628void mlx5e_activate_priv_channels(struct mlx5e_priv *priv)
acc6c595 2629{
9008ae07
SM
2630 int num_txqs = priv->channels.num * priv->channels.params.num_tc;
2631 struct net_device *netdev = priv->netdev;
2632
2633 mlx5e_netdev_set_tcs(netdev);
053ee0a7
TR
2634 netif_set_real_num_tx_queues(netdev, num_txqs);
2635 netif_set_real_num_rx_queues(netdev, priv->channels.num);
9008ae07 2636
acc6c595
SM
2637 mlx5e_build_channels_tx_maps(priv);
2638 mlx5e_activate_channels(&priv->channels);
2639 netif_tx_start_all_queues(priv->netdev);
9008ae07 2640
4811bc57 2641 if (MLX5_ESWITCH_MANAGER(priv->mdev))
9008ae07
SM
2642 mlx5e_add_sqs_fwd_rules(priv);
2643
acc6c595 2644 mlx5e_wait_channels_min_rx_wqes(&priv->channels);
9008ae07 2645 mlx5e_redirect_rqts_to_channels(priv, &priv->channels);
acc6c595
SM
2646}
2647
603f4a45 2648void mlx5e_deactivate_priv_channels(struct mlx5e_priv *priv)
acc6c595 2649{
9008ae07
SM
2650 mlx5e_redirect_rqts_to_drop(priv);
2651
4811bc57 2652 if (MLX5_ESWITCH_MANAGER(priv->mdev))
9008ae07
SM
2653 mlx5e_remove_sqs_fwd_rules(priv);
2654
acc6c595
SM
2655 /* FIXME: This is a W/A only for tx timeout watch dog false alarm when
2656 * polling for inactive tx queues.
2657 */
2658 netif_tx_stop_all_queues(priv->netdev);
2659 netif_tx_disable(priv->netdev);
2660 mlx5e_deactivate_channels(&priv->channels);
2661}
2662
55c2503d 2663void mlx5e_switch_priv_channels(struct mlx5e_priv *priv,
2e20a151
SM
2664 struct mlx5e_channels *new_chs,
2665 mlx5e_fp_hw_modify hw_modify)
55c2503d
SM
2666{
2667 struct net_device *netdev = priv->netdev;
2668 int new_num_txqs;
7ca42c80 2669 int carrier_ok;
55c2503d
SM
2670 new_num_txqs = new_chs->num * new_chs->params.num_tc;
2671
7ca42c80 2672 carrier_ok = netif_carrier_ok(netdev);
55c2503d
SM
2673 netif_carrier_off(netdev);
2674
2675 if (new_num_txqs < netdev->real_num_tx_queues)
2676 netif_set_real_num_tx_queues(netdev, new_num_txqs);
2677
2678 mlx5e_deactivate_priv_channels(priv);
2679 mlx5e_close_channels(&priv->channels);
2680
2681 priv->channels = *new_chs;
2682
2e20a151
SM
2683 /* New channels are ready to roll, modify HW settings if needed */
2684 if (hw_modify)
2685 hw_modify(priv);
2686
55c2503d
SM
2687 mlx5e_refresh_tirs(priv, false);
2688 mlx5e_activate_priv_channels(priv);
2689
7ca42c80
ES
2690 /* return carrier back if needed */
2691 if (carrier_ok)
2692 netif_carrier_on(netdev);
55c2503d
SM
2693}
2694
237f258c 2695void mlx5e_timestamp_init(struct mlx5e_priv *priv)
7c39afb3
FD
2696{
2697 priv->tstamp.tx_type = HWTSTAMP_TX_OFF;
2698 priv->tstamp.rx_filter = HWTSTAMP_FILTER_NONE;
2699}
2700
40ab6a6e
AS
2701int mlx5e_open_locked(struct net_device *netdev)
2702{
2703 struct mlx5e_priv *priv = netdev_priv(netdev);
40ab6a6e
AS
2704 int err;
2705
2706 set_bit(MLX5E_STATE_OPENED, &priv->state);
2707
ff9c852f 2708 err = mlx5e_open_channels(priv, &priv->channels);
acc6c595 2709 if (err)
343b29f3 2710 goto err_clear_state_opened_flag;
40ab6a6e 2711
b676f653 2712 mlx5e_refresh_tirs(priv, false);
acc6c595 2713 mlx5e_activate_priv_channels(priv);
7ca42c80
ES
2714 if (priv->profile->update_carrier)
2715 priv->profile->update_carrier(priv);
be4891af 2716
cb67b832
HHZ
2717 if (priv->profile->update_stats)
2718 queue_delayed_work(priv->wq, &priv->update_stats_work, 0);
40ab6a6e 2719
9b37b07f 2720 return 0;
343b29f3
AS
2721
2722err_clear_state_opened_flag:
2723 clear_bit(MLX5E_STATE_OPENED, &priv->state);
2724 return err;
40ab6a6e
AS
2725}
2726
cb67b832 2727int mlx5e_open(struct net_device *netdev)
40ab6a6e
AS
2728{
2729 struct mlx5e_priv *priv = netdev_priv(netdev);
2730 int err;
2731
2732 mutex_lock(&priv->state_lock);
2733 err = mlx5e_open_locked(netdev);
63bfd399
EBE
2734 if (!err)
2735 mlx5_set_port_admin_status(priv->mdev, MLX5_PORT_UP);
40ab6a6e
AS
2736 mutex_unlock(&priv->state_lock);
2737
c36af623
SK
2738 if (mlx5e_vxlan_allowed(priv->mdev))
2739 udp_tunnel_get_rx_info(netdev);
2740
40ab6a6e
AS
2741 return err;
2742}
2743
2744int mlx5e_close_locked(struct net_device *netdev)
2745{
2746 struct mlx5e_priv *priv = netdev_priv(netdev);
2747
a1985740
AS
2748 /* May already be CLOSED in case a previous configuration operation
2749 * (e.g RX/TX queue size change) that involves close&open failed.
2750 */
2751 if (!test_bit(MLX5E_STATE_OPENED, &priv->state))
2752 return 0;
2753
40ab6a6e
AS
2754 clear_bit(MLX5E_STATE_OPENED, &priv->state);
2755
40ab6a6e 2756 netif_carrier_off(priv->netdev);
acc6c595
SM
2757 mlx5e_deactivate_priv_channels(priv);
2758 mlx5e_close_channels(&priv->channels);
40ab6a6e
AS
2759
2760 return 0;
2761}
2762
cb67b832 2763int mlx5e_close(struct net_device *netdev)
40ab6a6e
AS
2764{
2765 struct mlx5e_priv *priv = netdev_priv(netdev);
2766 int err;
2767
26e59d80
MHY
2768 if (!netif_device_present(netdev))
2769 return -ENODEV;
2770
40ab6a6e 2771 mutex_lock(&priv->state_lock);
63bfd399 2772 mlx5_set_port_admin_status(priv->mdev, MLX5_PORT_DOWN);
40ab6a6e
AS
2773 err = mlx5e_close_locked(netdev);
2774 mutex_unlock(&priv->state_lock);
2775
2776 return err;
2777}
2778
a43b25da 2779static int mlx5e_alloc_drop_rq(struct mlx5_core_dev *mdev,
3b77235b
SM
2780 struct mlx5e_rq *rq,
2781 struct mlx5e_rq_param *param)
40ab6a6e 2782{
40ab6a6e
AS
2783 void *rqc = param->rqc;
2784 void *rqc_wq = MLX5_ADDR_OF(rqc, rqc, wq);
2785 int err;
2786
2787 param->wq.db_numa_node = param->wq.buf_numa_node;
2788
2789 err = mlx5_wq_ll_create(mdev, &param->wq, rqc_wq, &rq->wq,
2790 &rq->wq_ctrl);
2791 if (err)
2792 return err;
2793
a43b25da 2794 rq->mdev = mdev;
40ab6a6e
AS
2795
2796 return 0;
2797}
2798
a43b25da 2799static int mlx5e_alloc_drop_cq(struct mlx5_core_dev *mdev,
3b77235b
SM
2800 struct mlx5e_cq *cq,
2801 struct mlx5e_cq_param *param)
40ab6a6e 2802{
0ff59f96
GP
2803 param->wq.buf_numa_node = dev_to_node(&mdev->pdev->dev);
2804 param->wq.db_numa_node = dev_to_node(&mdev->pdev->dev);
2805
95b6c6a5 2806 return mlx5e_alloc_cq_common(mdev, param, cq);
40ab6a6e
AS
2807}
2808
a43b25da
SM
2809static int mlx5e_open_drop_rq(struct mlx5_core_dev *mdev,
2810 struct mlx5e_rq *drop_rq)
40ab6a6e 2811{
a43b25da
SM
2812 struct mlx5e_cq_param cq_param = {};
2813 struct mlx5e_rq_param rq_param = {};
2814 struct mlx5e_cq *cq = &drop_rq->cq;
40ab6a6e
AS
2815 int err;
2816
0ff59f96 2817 mlx5e_build_drop_rq_param(mdev, &rq_param);
40ab6a6e 2818
a43b25da 2819 err = mlx5e_alloc_drop_cq(mdev, cq, &cq_param);
40ab6a6e
AS
2820 if (err)
2821 return err;
2822
3b77235b 2823 err = mlx5e_create_cq(cq, &cq_param);
40ab6a6e 2824 if (err)
3b77235b 2825 goto err_free_cq;
40ab6a6e 2826
a43b25da 2827 err = mlx5e_alloc_drop_rq(mdev, drop_rq, &rq_param);
40ab6a6e 2828 if (err)
3b77235b 2829 goto err_destroy_cq;
40ab6a6e 2830
a43b25da 2831 err = mlx5e_create_rq(drop_rq, &rq_param);
40ab6a6e 2832 if (err)
3b77235b 2833 goto err_free_rq;
40ab6a6e
AS
2834
2835 return 0;
2836
3b77235b 2837err_free_rq:
a43b25da 2838 mlx5e_free_rq(drop_rq);
40ab6a6e
AS
2839
2840err_destroy_cq:
a43b25da 2841 mlx5e_destroy_cq(cq);
40ab6a6e 2842
3b77235b 2843err_free_cq:
a43b25da 2844 mlx5e_free_cq(cq);
3b77235b 2845
40ab6a6e
AS
2846 return err;
2847}
2848
a43b25da 2849static void mlx5e_close_drop_rq(struct mlx5e_rq *drop_rq)
40ab6a6e 2850{
a43b25da
SM
2851 mlx5e_destroy_rq(drop_rq);
2852 mlx5e_free_rq(drop_rq);
2853 mlx5e_destroy_cq(&drop_rq->cq);
2854 mlx5e_free_cq(&drop_rq->cq);
40ab6a6e
AS
2855}
2856
5426a0b2
SM
2857int mlx5e_create_tis(struct mlx5_core_dev *mdev, int tc,
2858 u32 underlay_qpn, u32 *tisn)
40ab6a6e 2859{
c4f287c4 2860 u32 in[MLX5_ST_SZ_DW(create_tis_in)] = {0};
40ab6a6e
AS
2861 void *tisc = MLX5_ADDR_OF(create_tis_in, in, ctx);
2862
08fb1dac 2863 MLX5_SET(tisc, tisc, prio, tc << 1);
5426a0b2 2864 MLX5_SET(tisc, tisc, underlay_qpn, underlay_qpn);
b50d292b 2865 MLX5_SET(tisc, tisc, transport_domain, mdev->mlx5e_res.td.tdn);
db60b802
AH
2866
2867 if (mlx5_lag_is_lacp_owner(mdev))
2868 MLX5_SET(tisc, tisc, strict_lag_tx_port_affinity, 1);
2869
5426a0b2 2870 return mlx5_core_create_tis(mdev, in, sizeof(in), tisn);
40ab6a6e
AS
2871}
2872
5426a0b2 2873void mlx5e_destroy_tis(struct mlx5_core_dev *mdev, u32 tisn)
40ab6a6e 2874{
5426a0b2 2875 mlx5_core_destroy_tis(mdev, tisn);
40ab6a6e
AS
2876}
2877
cb67b832 2878int mlx5e_create_tises(struct mlx5e_priv *priv)
40ab6a6e
AS
2879{
2880 int err;
2881 int tc;
2882
6bfd390b 2883 for (tc = 0; tc < priv->profile->max_tc; tc++) {
5426a0b2 2884 err = mlx5e_create_tis(priv->mdev, tc, 0, &priv->tisn[tc]);
40ab6a6e
AS
2885 if (err)
2886 goto err_close_tises;
2887 }
2888
2889 return 0;
2890
2891err_close_tises:
2892 for (tc--; tc >= 0; tc--)
5426a0b2 2893 mlx5e_destroy_tis(priv->mdev, priv->tisn[tc]);
40ab6a6e
AS
2894
2895 return err;
2896}
2897
cb67b832 2898void mlx5e_cleanup_nic_tx(struct mlx5e_priv *priv)
40ab6a6e
AS
2899{
2900 int tc;
2901
6bfd390b 2902 for (tc = 0; tc < priv->profile->max_tc; tc++)
5426a0b2 2903 mlx5e_destroy_tis(priv->mdev, priv->tisn[tc]);
40ab6a6e
AS
2904}
2905
6a9764ef
SM
2906static void mlx5e_build_indir_tir_ctx(struct mlx5e_priv *priv,
2907 enum mlx5e_traffic_types tt,
2908 u32 *tirc)
f62b8bb8 2909{
b50d292b 2910 MLX5_SET(tirc, tirc, transport_domain, priv->mdev->mlx5e_res.td.tdn);
3191e05f 2911
6a9764ef 2912 mlx5e_build_tir_ctx_lro(&priv->channels.params, tirc);
f62b8bb8 2913
4cbeaff5 2914 MLX5_SET(tirc, tirc, disp_type, MLX5_TIRC_DISP_TYPE_INDIRECT);
398f3351 2915 MLX5_SET(tirc, tirc, indirect_table, priv->indir_rqt.rqtn);
7b3722fa 2916 mlx5e_build_indir_tir_ctx_hash(&priv->channels.params, tt, tirc, false);
f62b8bb8
AV
2917}
2918
6a9764ef 2919static void mlx5e_build_direct_tir_ctx(struct mlx5e_priv *priv, u32 rqtn, u32 *tirc)
f62b8bb8 2920{
b50d292b 2921 MLX5_SET(tirc, tirc, transport_domain, priv->mdev->mlx5e_res.td.tdn);
1da36696 2922
6a9764ef 2923 mlx5e_build_tir_ctx_lro(&priv->channels.params, tirc);
1da36696
TT
2924
2925 MLX5_SET(tirc, tirc, disp_type, MLX5_TIRC_DISP_TYPE_INDIRECT);
2926 MLX5_SET(tirc, tirc, indirect_table, rqtn);
2927 MLX5_SET(tirc, tirc, rx_hash_fn, MLX5_RX_HASH_FN_INVERTED_XOR8);
2928}
2929
8f493ffd 2930int mlx5e_create_indirect_tirs(struct mlx5e_priv *priv)
1da36696 2931{
724b2aa1 2932 struct mlx5e_tir *tir;
f62b8bb8
AV
2933 void *tirc;
2934 int inlen;
7b3722fa 2935 int i = 0;
f62b8bb8 2936 int err;
1da36696 2937 u32 *in;
1da36696 2938 int tt;
f62b8bb8
AV
2939
2940 inlen = MLX5_ST_SZ_BYTES(create_tir_in);
1b9a07ee 2941 in = kvzalloc(inlen, GFP_KERNEL);
f62b8bb8
AV
2942 if (!in)
2943 return -ENOMEM;
2944
1da36696
TT
2945 for (tt = 0; tt < MLX5E_NUM_INDIR_TIRS; tt++) {
2946 memset(in, 0, inlen);
724b2aa1 2947 tir = &priv->indir_tir[tt];
1da36696 2948 tirc = MLX5_ADDR_OF(create_tir_in, in, ctx);
6a9764ef 2949 mlx5e_build_indir_tir_ctx(priv, tt, tirc);
724b2aa1 2950 err = mlx5e_create_tir(priv->mdev, tir, in, inlen);
7b3722fa
GP
2951 if (err) {
2952 mlx5_core_warn(priv->mdev, "create indirect tirs failed, %d\n", err);
2953 goto err_destroy_inner_tirs;
2954 }
f62b8bb8
AV
2955 }
2956
7b3722fa
GP
2957 if (!mlx5e_tunnel_inner_ft_supported(priv->mdev))
2958 goto out;
2959
2960 for (i = 0; i < MLX5E_NUM_INDIR_TIRS; i++) {
2961 memset(in, 0, inlen);
2962 tir = &priv->inner_indir_tir[i];
2963 tirc = MLX5_ADDR_OF(create_tir_in, in, ctx);
2964 mlx5e_build_inner_indir_tir_ctx(priv, i, tirc);
2965 err = mlx5e_create_tir(priv->mdev, tir, in, inlen);
2966 if (err) {
2967 mlx5_core_warn(priv->mdev, "create inner indirect tirs failed, %d\n", err);
2968 goto err_destroy_inner_tirs;
2969 }
2970 }
2971
2972out:
6bfd390b
HHZ
2973 kvfree(in);
2974
2975 return 0;
2976
7b3722fa
GP
2977err_destroy_inner_tirs:
2978 for (i--; i >= 0; i--)
2979 mlx5e_destroy_tir(priv->mdev, &priv->inner_indir_tir[i]);
2980
6bfd390b
HHZ
2981 for (tt--; tt >= 0; tt--)
2982 mlx5e_destroy_tir(priv->mdev, &priv->indir_tir[tt]);
2983
2984 kvfree(in);
2985
2986 return err;
2987}
2988
cb67b832 2989int mlx5e_create_direct_tirs(struct mlx5e_priv *priv)
6bfd390b
HHZ
2990{
2991 int nch = priv->profile->max_nch(priv->mdev);
2992 struct mlx5e_tir *tir;
2993 void *tirc;
2994 int inlen;
2995 int err;
2996 u32 *in;
2997 int ix;
2998
2999 inlen = MLX5_ST_SZ_BYTES(create_tir_in);
1b9a07ee 3000 in = kvzalloc(inlen, GFP_KERNEL);
6bfd390b
HHZ
3001 if (!in)
3002 return -ENOMEM;
3003
1da36696
TT
3004 for (ix = 0; ix < nch; ix++) {
3005 memset(in, 0, inlen);
724b2aa1 3006 tir = &priv->direct_tir[ix];
1da36696 3007 tirc = MLX5_ADDR_OF(create_tir_in, in, ctx);
6a9764ef 3008 mlx5e_build_direct_tir_ctx(priv, priv->direct_tir[ix].rqt.rqtn, tirc);
724b2aa1 3009 err = mlx5e_create_tir(priv->mdev, tir, in, inlen);
1da36696
TT
3010 if (err)
3011 goto err_destroy_ch_tirs;
3012 }
3013
3014 kvfree(in);
3015
f62b8bb8
AV
3016 return 0;
3017
1da36696 3018err_destroy_ch_tirs:
8f493ffd 3019 mlx5_core_warn(priv->mdev, "create direct tirs failed, %d\n", err);
1da36696 3020 for (ix--; ix >= 0; ix--)
724b2aa1 3021 mlx5e_destroy_tir(priv->mdev, &priv->direct_tir[ix]);
1da36696 3022
1da36696 3023 kvfree(in);
f62b8bb8
AV
3024
3025 return err;
3026}
3027
8f493ffd 3028void mlx5e_destroy_indirect_tirs(struct mlx5e_priv *priv)
f62b8bb8
AV
3029{
3030 int i;
3031
1da36696 3032 for (i = 0; i < MLX5E_NUM_INDIR_TIRS; i++)
724b2aa1 3033 mlx5e_destroy_tir(priv->mdev, &priv->indir_tir[i]);
7b3722fa
GP
3034
3035 if (!mlx5e_tunnel_inner_ft_supported(priv->mdev))
3036 return;
3037
3038 for (i = 0; i < MLX5E_NUM_INDIR_TIRS; i++)
3039 mlx5e_destroy_tir(priv->mdev, &priv->inner_indir_tir[i]);
f62b8bb8
AV
3040}
3041
cb67b832 3042void mlx5e_destroy_direct_tirs(struct mlx5e_priv *priv)
6bfd390b
HHZ
3043{
3044 int nch = priv->profile->max_nch(priv->mdev);
3045 int i;
3046
3047 for (i = 0; i < nch; i++)
3048 mlx5e_destroy_tir(priv->mdev, &priv->direct_tir[i]);
3049}
3050
102722fc
GE
3051static int mlx5e_modify_channels_scatter_fcs(struct mlx5e_channels *chs, bool enable)
3052{
3053 int err = 0;
3054 int i;
3055
3056 for (i = 0; i < chs->num; i++) {
3057 err = mlx5e_modify_rq_scatter_fcs(&chs->c[i]->rq, enable);
3058 if (err)
3059 return err;
3060 }
3061
3062 return 0;
3063}
3064
f6d96a20 3065static int mlx5e_modify_channels_vsd(struct mlx5e_channels *chs, bool vsd)
36350114
GP
3066{
3067 int err = 0;
3068 int i;
3069
ff9c852f
SM
3070 for (i = 0; i < chs->num; i++) {
3071 err = mlx5e_modify_rq_vsd(&chs->c[i]->rq, vsd);
36350114
GP
3072 if (err)
3073 return err;
3074 }
3075
3076 return 0;
3077}
3078
0cf0f6d3
JP
3079static int mlx5e_setup_tc_mqprio(struct net_device *netdev,
3080 struct tc_mqprio_qopt *mqprio)
08fb1dac
SM
3081{
3082 struct mlx5e_priv *priv = netdev_priv(netdev);
6f9485af 3083 struct mlx5e_channels new_channels = {};
0cf0f6d3 3084 u8 tc = mqprio->num_tc;
08fb1dac
SM
3085 int err = 0;
3086
0cf0f6d3
JP
3087 mqprio->hw = TC_MQPRIO_HW_OFFLOAD_TCS;
3088
08fb1dac
SM
3089 if (tc && tc != MLX5E_MAX_NUM_TC)
3090 return -EINVAL;
3091
3092 mutex_lock(&priv->state_lock);
3093
6f9485af
SM
3094 new_channels.params = priv->channels.params;
3095 new_channels.params.num_tc = tc ? tc : 1;
08fb1dac 3096
20b6a1c7 3097 if (!test_bit(MLX5E_STATE_OPENED, &priv->state)) {
6f9485af
SM
3098 priv->channels.params = new_channels.params;
3099 goto out;
3100 }
08fb1dac 3101
6f9485af
SM
3102 err = mlx5e_open_channels(priv, &new_channels);
3103 if (err)
3104 goto out;
08fb1dac 3105
2e20a151 3106 mlx5e_switch_priv_channels(priv, &new_channels, NULL);
6f9485af 3107out:
08fb1dac 3108 mutex_unlock(&priv->state_lock);
08fb1dac
SM
3109 return err;
3110}
3111
e80541ec 3112#ifdef CONFIG_MLX5_ESWITCH
d6c862ba 3113static int mlx5e_setup_tc_cls_flower(struct mlx5e_priv *priv,
0cf0f6d3 3114 struct tc_cls_flower_offload *cls_flower)
08fb1dac 3115{
d6c862ba 3116 if (cls_flower->common.chain_index)
0cf0f6d3 3117 return -EOPNOTSUPP;
e8f887ac 3118
0cf0f6d3
JP
3119 switch (cls_flower->command) {
3120 case TC_CLSFLOWER_REPLACE:
5fd9fc4e 3121 return mlx5e_configure_flower(priv, cls_flower);
0cf0f6d3
JP
3122 case TC_CLSFLOWER_DESTROY:
3123 return mlx5e_delete_flower(priv, cls_flower);
3124 case TC_CLSFLOWER_STATS:
3125 return mlx5e_stats_flower(priv, cls_flower);
3126 default:
a5fcf8a6 3127 return -EOPNOTSUPP;
0cf0f6d3
JP
3128 }
3129}
d6c862ba
JP
3130
3131int mlx5e_setup_tc_block_cb(enum tc_setup_type type, void *type_data,
3132 void *cb_priv)
3133{
3134 struct mlx5e_priv *priv = cb_priv;
3135
44ae12a7
JP
3136 if (!tc_can_offload(priv->netdev))
3137 return -EOPNOTSUPP;
3138
d6c862ba
JP
3139 switch (type) {
3140 case TC_SETUP_CLSFLOWER:
3141 return mlx5e_setup_tc_cls_flower(priv, type_data);
3142 default:
3143 return -EOPNOTSUPP;
3144 }
3145}
3146
3147static int mlx5e_setup_tc_block(struct net_device *dev,
3148 struct tc_block_offload *f)
3149{
3150 struct mlx5e_priv *priv = netdev_priv(dev);
3151
3152 if (f->binder_type != TCF_BLOCK_BINDER_TYPE_CLSACT_INGRESS)
3153 return -EOPNOTSUPP;
3154
3155 switch (f->command) {
3156 case TC_BLOCK_BIND:
3157 return tcf_block_cb_register(f->block, mlx5e_setup_tc_block_cb,
3158 priv, priv);
3159 case TC_BLOCK_UNBIND:
3160 tcf_block_cb_unregister(f->block, mlx5e_setup_tc_block_cb,
3161 priv);
3162 return 0;
3163 default:
3164 return -EOPNOTSUPP;
3165 }
3166}
e80541ec 3167#endif
a5fcf8a6 3168
717503b9
JP
3169int mlx5e_setup_tc(struct net_device *dev, enum tc_setup_type type,
3170 void *type_data)
0cf0f6d3 3171{
2572ac53 3172 switch (type) {
fde6af47 3173#ifdef CONFIG_MLX5_ESWITCH
d6c862ba
JP
3174 case TC_SETUP_BLOCK:
3175 return mlx5e_setup_tc_block(dev, type_data);
fde6af47 3176#endif
575ed7d3 3177 case TC_SETUP_QDISC_MQPRIO:
de4784ca 3178 return mlx5e_setup_tc_mqprio(dev, type_data);
e8f887ac
AV
3179 default:
3180 return -EOPNOTSUPP;
3181 }
08fb1dac
SM
3182}
3183
bc1f4470 3184static void
f62b8bb8
AV
3185mlx5e_get_stats(struct net_device *dev, struct rtnl_link_stats64 *stats)
3186{
3187 struct mlx5e_priv *priv = netdev_priv(dev);
9218b44d 3188 struct mlx5e_sw_stats *sstats = &priv->stats.sw;
f62b8bb8 3189 struct mlx5e_vport_stats *vstats = &priv->stats.vport;
269e6b3a 3190 struct mlx5e_pport_stats *pstats = &priv->stats.pport;
f62b8bb8 3191
370bad0f
OG
3192 if (mlx5e_is_uplink_rep(priv)) {
3193 stats->rx_packets = PPORT_802_3_GET(pstats, a_frames_received_ok);
3194 stats->rx_bytes = PPORT_802_3_GET(pstats, a_octets_received_ok);
3195 stats->tx_packets = PPORT_802_3_GET(pstats, a_frames_transmitted_ok);
3196 stats->tx_bytes = PPORT_802_3_GET(pstats, a_octets_transmitted_ok);
3197 } else {
3198 stats->rx_packets = sstats->rx_packets;
3199 stats->rx_bytes = sstats->rx_bytes;
3200 stats->tx_packets = sstats->tx_packets;
3201 stats->tx_bytes = sstats->tx_bytes;
3202 stats->tx_dropped = sstats->tx_queue_dropped;
3203 }
269e6b3a
GP
3204
3205 stats->rx_dropped = priv->stats.qcnt.rx_out_of_buffer;
269e6b3a
GP
3206
3207 stats->rx_length_errors =
9218b44d
GP
3208 PPORT_802_3_GET(pstats, a_in_range_length_errors) +
3209 PPORT_802_3_GET(pstats, a_out_of_range_length_field) +
3210 PPORT_802_3_GET(pstats, a_frame_too_long_errors);
269e6b3a 3211 stats->rx_crc_errors =
9218b44d
GP
3212 PPORT_802_3_GET(pstats, a_frame_check_sequence_errors);
3213 stats->rx_frame_errors = PPORT_802_3_GET(pstats, a_alignment_errors);
3214 stats->tx_aborted_errors = PPORT_2863_GET(pstats, if_out_discards);
269e6b3a
GP
3215 stats->rx_errors = stats->rx_length_errors + stats->rx_crc_errors +
3216 stats->rx_frame_errors;
3217 stats->tx_errors = stats->tx_aborted_errors + stats->tx_carrier_errors;
3218
3219 /* vport multicast also counts packets that are dropped due to steering
3220 * or rx out of buffer
3221 */
9218b44d
GP
3222 stats->multicast =
3223 VPORT_COUNTER_GET(vstats, received_eth_multicast.packets);
f62b8bb8
AV
3224}
3225
3226static void mlx5e_set_rx_mode(struct net_device *dev)
3227{
3228 struct mlx5e_priv *priv = netdev_priv(dev);
3229
7bb29755 3230 queue_work(priv->wq, &priv->set_rx_mode_work);
f62b8bb8
AV
3231}
3232
3233static int mlx5e_set_mac(struct net_device *netdev, void *addr)
3234{
3235 struct mlx5e_priv *priv = netdev_priv(netdev);
3236 struct sockaddr *saddr = addr;
3237
3238 if (!is_valid_ether_addr(saddr->sa_data))
3239 return -EADDRNOTAVAIL;
3240
3241 netif_addr_lock_bh(netdev);
3242 ether_addr_copy(netdev->dev_addr, saddr->sa_data);
3243 netif_addr_unlock_bh(netdev);
3244
7bb29755 3245 queue_work(priv->wq, &priv->set_rx_mode_work);
f62b8bb8
AV
3246
3247 return 0;
3248}
3249
75b81ce7 3250#define MLX5E_SET_FEATURE(features, feature, enable) \
0e405443
GP
3251 do { \
3252 if (enable) \
75b81ce7 3253 *features |= feature; \
0e405443 3254 else \
75b81ce7 3255 *features &= ~feature; \
0e405443
GP
3256 } while (0)
3257
3258typedef int (*mlx5e_feature_handler)(struct net_device *netdev, bool enable);
3259
3260static int set_feature_lro(struct net_device *netdev, bool enable)
f62b8bb8
AV
3261{
3262 struct mlx5e_priv *priv = netdev_priv(netdev);
2e20a151
SM
3263 struct mlx5e_channels new_channels = {};
3264 int err = 0;
3265 bool reset;
f62b8bb8
AV
3266
3267 mutex_lock(&priv->state_lock);
f62b8bb8 3268
2e20a151
SM
3269 reset = (priv->channels.params.rq_wq_type == MLX5_WQ_TYPE_LINKED_LIST);
3270 reset = reset && test_bit(MLX5E_STATE_OPENED, &priv->state);
98e81b0a 3271
2e20a151
SM
3272 new_channels.params = priv->channels.params;
3273 new_channels.params.lro_en = enable;
3274
3275 if (!reset) {
3276 priv->channels.params = new_channels.params;
3277 err = mlx5e_modify_tirs_lro(priv);
3278 goto out;
98e81b0a 3279 }
f62b8bb8 3280
2e20a151
SM
3281 err = mlx5e_open_channels(priv, &new_channels);
3282 if (err)
3283 goto out;
0e405443 3284
2e20a151
SM
3285 mlx5e_switch_priv_channels(priv, &new_channels, mlx5e_modify_tirs_lro);
3286out:
9b37b07f 3287 mutex_unlock(&priv->state_lock);
0e405443
GP
3288 return err;
3289}
3290
2b52a283 3291static int set_feature_cvlan_filter(struct net_device *netdev, bool enable)
0e405443
GP
3292{
3293 struct mlx5e_priv *priv = netdev_priv(netdev);
3294
3295 if (enable)
2b52a283 3296 mlx5e_enable_cvlan_filter(priv);
0e405443 3297 else
2b52a283 3298 mlx5e_disable_cvlan_filter(priv);
0e405443
GP
3299
3300 return 0;
3301}
3302
d05d1096 3303#ifdef CONFIG_MLX5_ESWITCH
0e405443
GP
3304static int set_feature_tc_num_filters(struct net_device *netdev, bool enable)
3305{
3306 struct mlx5e_priv *priv = netdev_priv(netdev);
f62b8bb8 3307
0e405443 3308 if (!enable && mlx5e_tc_num_filters(priv)) {
e8f887ac
AV
3309 netdev_err(netdev,
3310 "Active offloaded tc filters, can't turn hw_tc_offload off\n");
3311 return -EINVAL;
3312 }
3313
0e405443
GP
3314 return 0;
3315}
d05d1096 3316#endif
0e405443 3317
94cb1ebb
EBE
3318static int set_feature_rx_all(struct net_device *netdev, bool enable)
3319{
3320 struct mlx5e_priv *priv = netdev_priv(netdev);
3321 struct mlx5_core_dev *mdev = priv->mdev;
3322
3323 return mlx5_set_port_fcs(mdev, !enable);
3324}
3325
102722fc
GE
3326static int set_feature_rx_fcs(struct net_device *netdev, bool enable)
3327{
3328 struct mlx5e_priv *priv = netdev_priv(netdev);
3329 int err;
3330
3331 mutex_lock(&priv->state_lock);
3332
3333 priv->channels.params.scatter_fcs_en = enable;
3334 err = mlx5e_modify_channels_scatter_fcs(&priv->channels, enable);
3335 if (err)
3336 priv->channels.params.scatter_fcs_en = !enable;
3337
3338 mutex_unlock(&priv->state_lock);
3339
3340 return err;
3341}
3342
36350114
GP
3343static int set_feature_rx_vlan(struct net_device *netdev, bool enable)
3344{
3345 struct mlx5e_priv *priv = netdev_priv(netdev);
ff9c852f 3346 int err = 0;
36350114
GP
3347
3348 mutex_lock(&priv->state_lock);
3349
6a9764ef 3350 priv->channels.params.vlan_strip_disable = !enable;
ff9c852f
SM
3351 if (!test_bit(MLX5E_STATE_OPENED, &priv->state))
3352 goto unlock;
3353
3354 err = mlx5e_modify_channels_vsd(&priv->channels, !enable);
36350114 3355 if (err)
6a9764ef 3356 priv->channels.params.vlan_strip_disable = enable;
36350114 3357
ff9c852f 3358unlock:
36350114
GP
3359 mutex_unlock(&priv->state_lock);
3360
3361 return err;
3362}
3363
45bf454a
MG
3364#ifdef CONFIG_RFS_ACCEL
3365static int set_feature_arfs(struct net_device *netdev, bool enable)
3366{
3367 struct mlx5e_priv *priv = netdev_priv(netdev);
3368 int err;
3369
3370 if (enable)
3371 err = mlx5e_arfs_enable(priv);
3372 else
3373 err = mlx5e_arfs_disable(priv);
3374
3375 return err;
3376}
3377#endif
3378
0e405443 3379static int mlx5e_handle_feature(struct net_device *netdev,
75b81ce7 3380 netdev_features_t *features,
0e405443
GP
3381 netdev_features_t wanted_features,
3382 netdev_features_t feature,
3383 mlx5e_feature_handler feature_handler)
3384{
3385 netdev_features_t changes = wanted_features ^ netdev->features;
3386 bool enable = !!(wanted_features & feature);
3387 int err;
3388
3389 if (!(changes & feature))
3390 return 0;
3391
3392 err = feature_handler(netdev, enable);
3393 if (err) {
b20eab15
GP
3394 netdev_err(netdev, "%s feature %pNF failed, err %d\n",
3395 enable ? "Enable" : "Disable", &feature, err);
0e405443
GP
3396 return err;
3397 }
3398
75b81ce7 3399 MLX5E_SET_FEATURE(features, feature, enable);
0e405443
GP
3400 return 0;
3401}
3402
3403static int mlx5e_set_features(struct net_device *netdev,
3404 netdev_features_t features)
3405{
75b81ce7 3406 netdev_features_t oper_features = netdev->features;
0e405443
GP
3407 int err;
3408
75b81ce7
GP
3409 err = mlx5e_handle_feature(netdev, &oper_features, features,
3410 NETIF_F_LRO, set_feature_lro);
3411 err |= mlx5e_handle_feature(netdev, &oper_features, features,
0e405443 3412 NETIF_F_HW_VLAN_CTAG_FILTER,
2b52a283 3413 set_feature_cvlan_filter);
d05d1096 3414#ifdef CONFIG_MLX5_ESWITCH
75b81ce7
GP
3415 err |= mlx5e_handle_feature(netdev, &oper_features, features,
3416 NETIF_F_HW_TC, set_feature_tc_num_filters);
d05d1096 3417#endif
75b81ce7
GP
3418 err |= mlx5e_handle_feature(netdev, &oper_features, features,
3419 NETIF_F_RXALL, set_feature_rx_all);
3420 err |= mlx5e_handle_feature(netdev, &oper_features, features,
3421 NETIF_F_RXFCS, set_feature_rx_fcs);
3422 err |= mlx5e_handle_feature(netdev, &oper_features, features,
3423 NETIF_F_HW_VLAN_CTAG_RX, set_feature_rx_vlan);
45bf454a 3424#ifdef CONFIG_RFS_ACCEL
75b81ce7
GP
3425 err |= mlx5e_handle_feature(netdev, &oper_features, features,
3426 NETIF_F_NTUPLE, set_feature_arfs);
45bf454a 3427#endif
0e405443 3428
75b81ce7
GP
3429 if (err) {
3430 netdev->features = oper_features;
3431 return -EINVAL;
3432 }
3433
3434 return 0;
f62b8bb8
AV
3435}
3436
7d92d580
GP
3437static netdev_features_t mlx5e_fix_features(struct net_device *netdev,
3438 netdev_features_t features)
3439{
3440 struct mlx5e_priv *priv = netdev_priv(netdev);
3441
3442 mutex_lock(&priv->state_lock);
3443 if (!bitmap_empty(priv->fs.vlan.active_svlans, VLAN_N_VID)) {
3444 /* HW strips the outer C-tag header, this is a problem
3445 * for S-tag traffic.
3446 */
3447 features &= ~NETIF_F_HW_VLAN_CTAG_RX;
3448 if (!priv->channels.params.vlan_strip_disable)
3449 netdev_warn(netdev, "Dropping C-tag vlan stripping offload due to S-tag vlan\n");
3450 }
3451 mutex_unlock(&priv->state_lock);
3452
3453 return features;
3454}
3455
f62b8bb8
AV
3456static int mlx5e_change_mtu(struct net_device *netdev, int new_mtu)
3457{
3458 struct mlx5e_priv *priv = netdev_priv(netdev);
2e20a151
SM
3459 struct mlx5e_channels new_channels = {};
3460 int curr_mtu;
98e81b0a 3461 int err = 0;
506753b0 3462 bool reset;
f62b8bb8 3463
f62b8bb8 3464 mutex_lock(&priv->state_lock);
98e81b0a 3465
6a9764ef
SM
3466 reset = !priv->channels.params.lro_en &&
3467 (priv->channels.params.rq_wq_type !=
506753b0
TT
3468 MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ);
3469
2e20a151 3470 reset = reset && test_bit(MLX5E_STATE_OPENED, &priv->state);
98e81b0a 3471
2e20a151 3472 curr_mtu = netdev->mtu;
f62b8bb8 3473 netdev->mtu = new_mtu;
98e81b0a 3474
2e20a151
SM
3475 if (!reset) {
3476 mlx5e_set_dev_port_mtu(priv);
3477 goto out;
3478 }
98e81b0a 3479
2e20a151
SM
3480 new_channels.params = priv->channels.params;
3481 err = mlx5e_open_channels(priv, &new_channels);
3482 if (err) {
3483 netdev->mtu = curr_mtu;
3484 goto out;
3485 }
3486
3487 mlx5e_switch_priv_channels(priv, &new_channels, mlx5e_set_dev_port_mtu);
f62b8bb8 3488
2e20a151
SM
3489out:
3490 mutex_unlock(&priv->state_lock);
f62b8bb8
AV
3491 return err;
3492}
3493
7c39afb3
FD
3494int mlx5e_hwstamp_set(struct mlx5e_priv *priv, struct ifreq *ifr)
3495{
3496 struct hwtstamp_config config;
3497 int err;
3498
3499 if (!MLX5_CAP_GEN(priv->mdev, device_frequency_khz))
3500 return -EOPNOTSUPP;
3501
3502 if (copy_from_user(&config, ifr->ifr_data, sizeof(config)))
3503 return -EFAULT;
3504
3505 /* TX HW timestamp */
3506 switch (config.tx_type) {
3507 case HWTSTAMP_TX_OFF:
3508 case HWTSTAMP_TX_ON:
3509 break;
3510 default:
3511 return -ERANGE;
3512 }
3513
3514 mutex_lock(&priv->state_lock);
3515 /* RX HW timestamp */
3516 switch (config.rx_filter) {
3517 case HWTSTAMP_FILTER_NONE:
3518 /* Reset CQE compression to Admin default */
3519 mlx5e_modify_rx_cqe_compression_locked(priv, priv->channels.params.rx_cqe_compress_def);
3520 break;
3521 case HWTSTAMP_FILTER_ALL:
3522 case HWTSTAMP_FILTER_SOME:
3523 case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
3524 case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
3525 case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
3526 case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
3527 case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
3528 case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
3529 case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
3530 case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
3531 case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
3532 case HWTSTAMP_FILTER_PTP_V2_EVENT:
3533 case HWTSTAMP_FILTER_PTP_V2_SYNC:
3534 case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
3535 case HWTSTAMP_FILTER_NTP_ALL:
3536 /* Disable CQE compression */
3537 netdev_warn(priv->netdev, "Disabling cqe compression");
3538 err = mlx5e_modify_rx_cqe_compression_locked(priv, false);
3539 if (err) {
3540 netdev_err(priv->netdev, "Failed disabling cqe compression err=%d\n", err);
3541 mutex_unlock(&priv->state_lock);
3542 return err;
3543 }
3544 config.rx_filter = HWTSTAMP_FILTER_ALL;
3545 break;
3546 default:
3547 mutex_unlock(&priv->state_lock);
3548 return -ERANGE;
3549 }
3550
3551 memcpy(&priv->tstamp, &config, sizeof(config));
3552 mutex_unlock(&priv->state_lock);
3553
3554 return copy_to_user(ifr->ifr_data, &config,
3555 sizeof(config)) ? -EFAULT : 0;
3556}
3557
3558int mlx5e_hwstamp_get(struct mlx5e_priv *priv, struct ifreq *ifr)
3559{
3560 struct hwtstamp_config *cfg = &priv->tstamp;
3561
3562 if (!MLX5_CAP_GEN(priv->mdev, device_frequency_khz))
3563 return -EOPNOTSUPP;
3564
3565 return copy_to_user(ifr->ifr_data, cfg, sizeof(*cfg)) ? -EFAULT : 0;
3566}
3567
ef9814de
EBE
3568static int mlx5e_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
3569{
1170fbd8
FD
3570 struct mlx5e_priv *priv = netdev_priv(dev);
3571
ef9814de
EBE
3572 switch (cmd) {
3573 case SIOCSHWTSTAMP:
1170fbd8 3574 return mlx5e_hwstamp_set(priv, ifr);
ef9814de 3575 case SIOCGHWTSTAMP:
1170fbd8 3576 return mlx5e_hwstamp_get(priv, ifr);
ef9814de
EBE
3577 default:
3578 return -EOPNOTSUPP;
3579 }
3580}
3581
e80541ec 3582#ifdef CONFIG_MLX5_ESWITCH
66e49ded
SM
3583static int mlx5e_set_vf_mac(struct net_device *dev, int vf, u8 *mac)
3584{
3585 struct mlx5e_priv *priv = netdev_priv(dev);
3586 struct mlx5_core_dev *mdev = priv->mdev;
3587
3588 return mlx5_eswitch_set_vport_mac(mdev->priv.eswitch, vf + 1, mac);
3589}
3590
79aab093
MS
3591static int mlx5e_set_vf_vlan(struct net_device *dev, int vf, u16 vlan, u8 qos,
3592 __be16 vlan_proto)
66e49ded
SM
3593{
3594 struct mlx5e_priv *priv = netdev_priv(dev);
3595 struct mlx5_core_dev *mdev = priv->mdev;
3596
79aab093
MS
3597 if (vlan_proto != htons(ETH_P_8021Q))
3598 return -EPROTONOSUPPORT;
3599
66e49ded
SM
3600 return mlx5_eswitch_set_vport_vlan(mdev->priv.eswitch, vf + 1,
3601 vlan, qos);
3602}
3603
f942380c
MHY
3604static int mlx5e_set_vf_spoofchk(struct net_device *dev, int vf, bool setting)
3605{
3606 struct mlx5e_priv *priv = netdev_priv(dev);
3607 struct mlx5_core_dev *mdev = priv->mdev;
3608
3609 return mlx5_eswitch_set_vport_spoofchk(mdev->priv.eswitch, vf + 1, setting);
3610}
3611
1edc57e2
MHY
3612static int mlx5e_set_vf_trust(struct net_device *dev, int vf, bool setting)
3613{
3614 struct mlx5e_priv *priv = netdev_priv(dev);
3615 struct mlx5_core_dev *mdev = priv->mdev;
3616
3617 return mlx5_eswitch_set_vport_trust(mdev->priv.eswitch, vf + 1, setting);
3618}
bd77bf1c
MHY
3619
3620static int mlx5e_set_vf_rate(struct net_device *dev, int vf, int min_tx_rate,
3621 int max_tx_rate)
3622{
3623 struct mlx5e_priv *priv = netdev_priv(dev);
3624 struct mlx5_core_dev *mdev = priv->mdev;
3625
bd77bf1c 3626 return mlx5_eswitch_set_vport_rate(mdev->priv.eswitch, vf + 1,
c9497c98 3627 max_tx_rate, min_tx_rate);
bd77bf1c
MHY
3628}
3629
66e49ded
SM
3630static int mlx5_vport_link2ifla(u8 esw_link)
3631{
3632 switch (esw_link) {
3633 case MLX5_ESW_VPORT_ADMIN_STATE_DOWN:
3634 return IFLA_VF_LINK_STATE_DISABLE;
3635 case MLX5_ESW_VPORT_ADMIN_STATE_UP:
3636 return IFLA_VF_LINK_STATE_ENABLE;
3637 }
3638 return IFLA_VF_LINK_STATE_AUTO;
3639}
3640
3641static int mlx5_ifla_link2vport(u8 ifla_link)
3642{
3643 switch (ifla_link) {
3644 case IFLA_VF_LINK_STATE_DISABLE:
3645 return MLX5_ESW_VPORT_ADMIN_STATE_DOWN;
3646 case IFLA_VF_LINK_STATE_ENABLE:
3647 return MLX5_ESW_VPORT_ADMIN_STATE_UP;
3648 }
3649 return MLX5_ESW_VPORT_ADMIN_STATE_AUTO;
3650}
3651
3652static int mlx5e_set_vf_link_state(struct net_device *dev, int vf,
3653 int link_state)
3654{
3655 struct mlx5e_priv *priv = netdev_priv(dev);
3656 struct mlx5_core_dev *mdev = priv->mdev;
3657
3658 return mlx5_eswitch_set_vport_state(mdev->priv.eswitch, vf + 1,
3659 mlx5_ifla_link2vport(link_state));
3660}
3661
3662static int mlx5e_get_vf_config(struct net_device *dev,
3663 int vf, struct ifla_vf_info *ivi)
3664{
3665 struct mlx5e_priv *priv = netdev_priv(dev);
3666 struct mlx5_core_dev *mdev = priv->mdev;
3667 int err;
3668
3669 err = mlx5_eswitch_get_vport_config(mdev->priv.eswitch, vf + 1, ivi);
3670 if (err)
3671 return err;
3672 ivi->linkstate = mlx5_vport_link2ifla(ivi->linkstate);
3673 return 0;
3674}
3675
3676static int mlx5e_get_vf_stats(struct net_device *dev,
3677 int vf, struct ifla_vf_stats *vf_stats)
3678{
3679 struct mlx5e_priv *priv = netdev_priv(dev);
3680 struct mlx5_core_dev *mdev = priv->mdev;
3681
3682 return mlx5_eswitch_get_vport_stats(mdev->priv.eswitch, vf + 1,
3683 vf_stats);
3684}
e80541ec 3685#endif
66e49ded 3686
1ad9a00a
PB
3687static void mlx5e_add_vxlan_port(struct net_device *netdev,
3688 struct udp_tunnel_info *ti)
b3f63c3d
MF
3689{
3690 struct mlx5e_priv *priv = netdev_priv(netdev);
3691
974c3f30
AD
3692 if (ti->type != UDP_TUNNEL_TYPE_VXLAN)
3693 return;
3694
b3f63c3d
MF
3695 if (!mlx5e_vxlan_allowed(priv->mdev))
3696 return;
3697
974c3f30 3698 mlx5e_vxlan_queue_work(priv, ti->sa_family, be16_to_cpu(ti->port), 1);
b3f63c3d
MF
3699}
3700
1ad9a00a
PB
3701static void mlx5e_del_vxlan_port(struct net_device *netdev,
3702 struct udp_tunnel_info *ti)
b3f63c3d
MF
3703{
3704 struct mlx5e_priv *priv = netdev_priv(netdev);
3705
974c3f30
AD
3706 if (ti->type != UDP_TUNNEL_TYPE_VXLAN)
3707 return;
3708
b3f63c3d
MF
3709 if (!mlx5e_vxlan_allowed(priv->mdev))
3710 return;
3711
974c3f30 3712 mlx5e_vxlan_queue_work(priv, ti->sa_family, be16_to_cpu(ti->port), 0);
b3f63c3d
MF
3713}
3714
27299841
GP
3715static netdev_features_t mlx5e_tunnel_features_check(struct mlx5e_priv *priv,
3716 struct sk_buff *skb,
3717 netdev_features_t features)
b3f63c3d 3718{
2989ad1e 3719 unsigned int offset = 0;
b3f63c3d 3720 struct udphdr *udph;
27299841
GP
3721 u8 proto;
3722 u16 port;
b3f63c3d
MF
3723
3724 switch (vlan_get_protocol(skb)) {
3725 case htons(ETH_P_IP):
3726 proto = ip_hdr(skb)->protocol;
3727 break;
3728 case htons(ETH_P_IPV6):
2989ad1e 3729 proto = ipv6_find_hdr(skb, &offset, -1, NULL, NULL);
b3f63c3d
MF
3730 break;
3731 default:
3732 goto out;
3733 }
3734
27299841
GP
3735 switch (proto) {
3736 case IPPROTO_GRE:
3737 return features;
3738 case IPPROTO_UDP:
b3f63c3d
MF
3739 udph = udp_hdr(skb);
3740 port = be16_to_cpu(udph->dest);
b3f63c3d 3741
27299841
GP
3742 /* Verify if UDP port is being offloaded by HW */
3743 if (mlx5e_vxlan_lookup_port(priv, port))
3744 return features;
3745 }
b3f63c3d
MF
3746
3747out:
3748 /* Disable CSUM and GSO if the udp dport is not offloaded by HW */
3749 return features & ~(NETIF_F_CSUM_MASK | NETIF_F_GSO_MASK);
3750}
3751
3752static netdev_features_t mlx5e_features_check(struct sk_buff *skb,
3753 struct net_device *netdev,
3754 netdev_features_t features)
3755{
3756 struct mlx5e_priv *priv = netdev_priv(netdev);
3757
3758 features = vlan_features_check(skb, features);
3759 features = vxlan_features_check(skb, features);
3760
2ac9cfe7
IT
3761#ifdef CONFIG_MLX5_EN_IPSEC
3762 if (mlx5e_ipsec_feature_check(skb, netdev, features))
3763 return features;
3764#endif
3765
b3f63c3d
MF
3766 /* Validate if the tunneled packet is being offloaded by HW */
3767 if (skb->encapsulation &&
3768 (features & NETIF_F_CSUM_MASK || features & NETIF_F_GSO_MASK))
27299841 3769 return mlx5e_tunnel_features_check(priv, skb, features);
b3f63c3d
MF
3770
3771 return features;
3772}
3773
3947ca18
DJ
3774static void mlx5e_tx_timeout(struct net_device *dev)
3775{
3776 struct mlx5e_priv *priv = netdev_priv(dev);
3777 bool sched_work = false;
3778 int i;
3779
3780 netdev_err(dev, "TX timeout detected\n");
3781
6a9764ef 3782 for (i = 0; i < priv->channels.num * priv->channels.params.num_tc; i++) {
acc6c595 3783 struct mlx5e_txqsq *sq = priv->txq2sq[i];
3947ca18 3784
2c1ccc99 3785 if (!netif_xmit_stopped(netdev_get_tx_queue(dev, i)))
3947ca18
DJ
3786 continue;
3787 sched_work = true;
c0f1147d 3788 clear_bit(MLX5E_SQ_STATE_ENABLED, &sq->state);
3947ca18
DJ
3789 netdev_err(dev, "TX timeout on queue: %d, SQ: 0x%x, CQ: 0x%x, SQ Cons: 0x%x SQ Prod: 0x%x\n",
3790 i, sq->sqn, sq->cq.mcq.cqn, sq->cc, sq->pc);
3791 }
3792
3793 if (sched_work && test_bit(MLX5E_STATE_OPENED, &priv->state))
3794 schedule_work(&priv->tx_timeout_work);
3795}
3796
86994156
RS
3797static int mlx5e_xdp_set(struct net_device *netdev, struct bpf_prog *prog)
3798{
3799 struct mlx5e_priv *priv = netdev_priv(netdev);
3800 struct bpf_prog *old_prog;
3801 int err = 0;
3802 bool reset, was_opened;
3803 int i;
3804
3805 mutex_lock(&priv->state_lock);
3806
3807 if ((netdev->features & NETIF_F_LRO) && prog) {
3808 netdev_warn(netdev, "can't set XDP while LRO is on, disable LRO first\n");
3809 err = -EINVAL;
3810 goto unlock;
3811 }
3812
547eede0
IT
3813 if ((netdev->features & NETIF_F_HW_ESP) && prog) {
3814 netdev_warn(netdev, "can't set XDP with IPSec offload\n");
3815 err = -EINVAL;
3816 goto unlock;
3817 }
3818
86994156
RS
3819 was_opened = test_bit(MLX5E_STATE_OPENED, &priv->state);
3820 /* no need for full reset when exchanging programs */
6a9764ef 3821 reset = (!priv->channels.params.xdp_prog || !prog);
86994156
RS
3822
3823 if (was_opened && reset)
3824 mlx5e_close_locked(netdev);
c54c0629
DB
3825 if (was_opened && !reset) {
3826 /* num_channels is invariant here, so we can take the
3827 * batched reference right upfront.
3828 */
6a9764ef 3829 prog = bpf_prog_add(prog, priv->channels.num);
c54c0629
DB
3830 if (IS_ERR(prog)) {
3831 err = PTR_ERR(prog);
3832 goto unlock;
3833 }
3834 }
86994156 3835
c54c0629
DB
3836 /* exchange programs, extra prog reference we got from caller
3837 * as long as we don't fail from this point onwards.
3838 */
6a9764ef 3839 old_prog = xchg(&priv->channels.params.xdp_prog, prog);
86994156
RS
3840 if (old_prog)
3841 bpf_prog_put(old_prog);
3842
3843 if (reset) /* change RQ type according to priv->xdp_prog */
6a9764ef 3844 mlx5e_set_rq_params(priv->mdev, &priv->channels.params);
86994156
RS
3845
3846 if (was_opened && reset)
3847 mlx5e_open_locked(netdev);
3848
3849 if (!test_bit(MLX5E_STATE_OPENED, &priv->state) || reset)
3850 goto unlock;
3851
3852 /* exchanging programs w/o reset, we update ref counts on behalf
3853 * of the channels RQs here.
3854 */
ff9c852f
SM
3855 for (i = 0; i < priv->channels.num; i++) {
3856 struct mlx5e_channel *c = priv->channels.c[i];
86994156 3857
c0f1147d 3858 clear_bit(MLX5E_RQ_STATE_ENABLED, &c->rq.state);
86994156
RS
3859 napi_synchronize(&c->napi);
3860 /* prevent mlx5e_poll_rx_cq from accessing rq->xdp_prog */
3861
3862 old_prog = xchg(&c->rq.xdp_prog, prog);
3863
c0f1147d 3864 set_bit(MLX5E_RQ_STATE_ENABLED, &c->rq.state);
86994156 3865 /* napi_schedule in case we have missed anything */
86994156
RS
3866 napi_schedule(&c->napi);
3867
3868 if (old_prog)
3869 bpf_prog_put(old_prog);
3870 }
3871
3872unlock:
3873 mutex_unlock(&priv->state_lock);
3874 return err;
3875}
3876
821b2e29 3877static u32 mlx5e_xdp_query(struct net_device *dev)
86994156
RS
3878{
3879 struct mlx5e_priv *priv = netdev_priv(dev);
821b2e29
MKL
3880 const struct bpf_prog *xdp_prog;
3881 u32 prog_id = 0;
86994156 3882
821b2e29
MKL
3883 mutex_lock(&priv->state_lock);
3884 xdp_prog = priv->channels.params.xdp_prog;
3885 if (xdp_prog)
3886 prog_id = xdp_prog->aux->id;
3887 mutex_unlock(&priv->state_lock);
3888
3889 return prog_id;
86994156
RS
3890}
3891
f4e63525 3892static int mlx5e_xdp(struct net_device *dev, struct netdev_bpf *xdp)
86994156
RS
3893{
3894 switch (xdp->command) {
3895 case XDP_SETUP_PROG:
3896 return mlx5e_xdp_set(dev, xdp->prog);
3897 case XDP_QUERY_PROG:
821b2e29
MKL
3898 xdp->prog_id = mlx5e_xdp_query(dev);
3899 xdp->prog_attached = !!xdp->prog_id;
86994156
RS
3900 return 0;
3901 default:
3902 return -EINVAL;
3903 }
3904}
3905
80378384
CO
3906#ifdef CONFIG_NET_POLL_CONTROLLER
3907/* Fake "interrupt" called by netpoll (eg netconsole) to send skbs without
3908 * reenabling interrupts.
3909 */
3910static void mlx5e_netpoll(struct net_device *dev)
3911{
3912 struct mlx5e_priv *priv = netdev_priv(dev);
ff9c852f
SM
3913 struct mlx5e_channels *chs = &priv->channels;
3914
80378384
CO
3915 int i;
3916
ff9c852f
SM
3917 for (i = 0; i < chs->num; i++)
3918 napi_schedule(&chs->c[i]->napi);
80378384
CO
3919}
3920#endif
3921
e80541ec 3922static const struct net_device_ops mlx5e_netdev_ops = {
f62b8bb8
AV
3923 .ndo_open = mlx5e_open,
3924 .ndo_stop = mlx5e_close,
3925 .ndo_start_xmit = mlx5e_xmit,
0cf0f6d3 3926 .ndo_setup_tc = mlx5e_setup_tc,
08fb1dac 3927 .ndo_select_queue = mlx5e_select_queue,
f62b8bb8
AV
3928 .ndo_get_stats64 = mlx5e_get_stats,
3929 .ndo_set_rx_mode = mlx5e_set_rx_mode,
3930 .ndo_set_mac_address = mlx5e_set_mac,
b0eed40e
SM
3931 .ndo_vlan_rx_add_vid = mlx5e_vlan_rx_add_vid,
3932 .ndo_vlan_rx_kill_vid = mlx5e_vlan_rx_kill_vid,
f62b8bb8 3933 .ndo_set_features = mlx5e_set_features,
7d92d580 3934 .ndo_fix_features = mlx5e_fix_features,
b0eed40e
SM
3935 .ndo_change_mtu = mlx5e_change_mtu,
3936 .ndo_do_ioctl = mlx5e_ioctl,
507f0c81 3937 .ndo_set_tx_maxrate = mlx5e_set_tx_maxrate,
706b3583
SM
3938 .ndo_udp_tunnel_add = mlx5e_add_vxlan_port,
3939 .ndo_udp_tunnel_del = mlx5e_del_vxlan_port,
3940 .ndo_features_check = mlx5e_features_check,
45bf454a
MG
3941#ifdef CONFIG_RFS_ACCEL
3942 .ndo_rx_flow_steer = mlx5e_rx_flow_steer,
3943#endif
3947ca18 3944 .ndo_tx_timeout = mlx5e_tx_timeout,
f4e63525 3945 .ndo_bpf = mlx5e_xdp,
80378384
CO
3946#ifdef CONFIG_NET_POLL_CONTROLLER
3947 .ndo_poll_controller = mlx5e_netpoll,
3948#endif
e80541ec 3949#ifdef CONFIG_MLX5_ESWITCH
706b3583 3950 /* SRIOV E-Switch NDOs */
b0eed40e
SM
3951 .ndo_set_vf_mac = mlx5e_set_vf_mac,
3952 .ndo_set_vf_vlan = mlx5e_set_vf_vlan,
f942380c 3953 .ndo_set_vf_spoofchk = mlx5e_set_vf_spoofchk,
1edc57e2 3954 .ndo_set_vf_trust = mlx5e_set_vf_trust,
bd77bf1c 3955 .ndo_set_vf_rate = mlx5e_set_vf_rate,
b0eed40e
SM
3956 .ndo_get_vf_config = mlx5e_get_vf_config,
3957 .ndo_set_vf_link_state = mlx5e_set_vf_link_state,
3958 .ndo_get_vf_stats = mlx5e_get_vf_stats,
370bad0f
OG
3959 .ndo_has_offload_stats = mlx5e_has_offload_stats,
3960 .ndo_get_offload_stats = mlx5e_get_offload_stats,
e80541ec 3961#endif
f62b8bb8
AV
3962};
3963
3964static int mlx5e_check_required_hca_cap(struct mlx5_core_dev *mdev)
3965{
3966 if (MLX5_CAP_GEN(mdev, port_type) != MLX5_CAP_PORT_TYPE_ETH)
9eb78923 3967 return -EOPNOTSUPP;
f62b8bb8
AV
3968 if (!MLX5_CAP_GEN(mdev, eth_net_offloads) ||
3969 !MLX5_CAP_GEN(mdev, nic_flow_table) ||
3970 !MLX5_CAP_ETH(mdev, csum_cap) ||
3971 !MLX5_CAP_ETH(mdev, max_lso_cap) ||
3972 !MLX5_CAP_ETH(mdev, vlan_cap) ||
796a27ec
GP
3973 !MLX5_CAP_ETH(mdev, rss_ind_tbl_cap) ||
3974 MLX5_CAP_FLOWTABLE(mdev,
3975 flow_table_properties_nic_receive.max_ft_level)
3976 < 3) {
f62b8bb8
AV
3977 mlx5_core_warn(mdev,
3978 "Not creating net device, some required device capabilities are missing\n");
9eb78923 3979 return -EOPNOTSUPP;
f62b8bb8 3980 }
66189961
TT
3981 if (!MLX5_CAP_ETH(mdev, self_lb_en_modifiable))
3982 mlx5_core_warn(mdev, "Self loop back prevention is not supported\n");
7524a5d8 3983 if (!MLX5_CAP_GEN(mdev, cq_moderation))
3e432ab6 3984 mlx5_core_warn(mdev, "CQ moderation is not supported\n");
66189961 3985
f62b8bb8
AV
3986 return 0;
3987}
3988
58d52291
AS
3989u16 mlx5e_get_max_inline_cap(struct mlx5_core_dev *mdev)
3990{
3991 int bf_buf_size = (1 << MLX5_CAP_GEN(mdev, log_bf_reg_size)) / 2;
3992
3993 return bf_buf_size -
3994 sizeof(struct mlx5e_tx_wqe) +
3995 2 /*sizeof(mlx5e_tx_wqe.inline_hdr_start)*/;
3996}
3997
d4b6c488 3998void mlx5e_build_default_indir_rqt(u32 *indirection_rqt, int len,
85082dba
TT
3999 int num_channels)
4000{
4001 int i;
4002
4003 for (i = 0; i < len; i++)
4004 indirection_rqt[i] = i % num_channels;
4005}
4006
b797a684
SM
4007static int mlx5e_get_pci_bw(struct mlx5_core_dev *mdev, u32 *pci_bw)
4008{
4009 enum pcie_link_width width;
4010 enum pci_bus_speed speed;
4011 int err = 0;
4012
4013 err = pcie_get_minimum_link(mdev->pdev, &speed, &width);
4014 if (err)
4015 return err;
4016
4017 if (speed == PCI_SPEED_UNKNOWN || width == PCIE_LNK_WIDTH_UNKNOWN)
4018 return -EINVAL;
4019
4020 switch (speed) {
4021 case PCIE_SPEED_2_5GT:
4022 *pci_bw = 2500 * width;
4023 break;
4024 case PCIE_SPEED_5_0GT:
4025 *pci_bw = 5000 * width;
4026 break;
4027 case PCIE_SPEED_8_0GT:
4028 *pci_bw = 8000 * width;
4029 break;
4030 default:
4031 return -EINVAL;
4032 }
4033
4034 return 0;
4035}
4036
4037static bool cqe_compress_heuristic(u32 link_speed, u32 pci_bw)
4038{
4039 return (link_speed && pci_bw &&
4040 (pci_bw < 40000) && (pci_bw < link_speed));
4041}
4042
0f6e4cf6
EBE
4043static bool hw_lro_heuristic(u32 link_speed, u32 pci_bw)
4044{
4045 return !(link_speed && pci_bw &&
4046 (pci_bw <= 16000) && (pci_bw < link_speed));
4047}
4048
0088cbbc
TG
4049void mlx5e_set_tx_cq_mode_params(struct mlx5e_params *params, u8 cq_period_mode)
4050{
4051 params->tx_cq_moderation.cq_period_mode = cq_period_mode;
4052
4053 params->tx_cq_moderation.pkts =
4054 MLX5E_PARAMS_DEFAULT_TX_CQ_MODERATION_PKTS;
4055 params->tx_cq_moderation.usec =
4056 MLX5E_PARAMS_DEFAULT_TX_CQ_MODERATION_USEC;
4057
4058 if (cq_period_mode == MLX5_CQ_PERIOD_MODE_START_FROM_CQE)
4059 params->tx_cq_moderation.usec =
4060 MLX5E_PARAMS_DEFAULT_TX_CQ_MODERATION_USEC_FROM_CQE;
4061
4062 MLX5E_SET_PFLAG(params, MLX5E_PFLAG_TX_CQE_BASED_MODER,
4063 params->tx_cq_moderation.cq_period_mode ==
4064 MLX5_CQ_PERIOD_MODE_START_FROM_CQE);
4065}
4066
9908aa29
TT
4067void mlx5e_set_rx_cq_mode_params(struct mlx5e_params *params, u8 cq_period_mode)
4068{
0088cbbc 4069 params->rx_cq_moderation.cq_period_mode = cq_period_mode;
9908aa29
TT
4070
4071 params->rx_cq_moderation.pkts =
4072 MLX5E_PARAMS_DEFAULT_RX_CQ_MODERATION_PKTS;
4073 params->rx_cq_moderation.usec =
0088cbbc 4074 MLX5E_PARAMS_DEFAULT_RX_CQ_MODERATION_USEC;
9908aa29
TT
4075
4076 if (cq_period_mode == MLX5_CQ_PERIOD_MODE_START_FROM_CQE)
4077 params->rx_cq_moderation.usec =
4078 MLX5E_PARAMS_DEFAULT_RX_CQ_MODERATION_USEC_FROM_CQE;
6a9764ef 4079
457fcd8a
SM
4080 if (params->rx_am_enabled)
4081 params->rx_cq_moderation =
0088cbbc 4082 mlx5e_am_get_def_profile(cq_period_mode);
457fcd8a 4083
6a9764ef 4084 MLX5E_SET_PFLAG(params, MLX5E_PFLAG_RX_CQE_BASED_MODER,
0088cbbc
TG
4085 params->rx_cq_moderation.cq_period_mode ==
4086 MLX5_CQ_PERIOD_MODE_START_FROM_CQE);
9908aa29
TT
4087}
4088
2b029556
SM
4089u32 mlx5e_choose_lro_timeout(struct mlx5_core_dev *mdev, u32 wanted_timeout)
4090{
4091 int i;
4092
4093 /* The supported periods are organized in ascending order */
4094 for (i = 0; i < MLX5E_LRO_TIMEOUT_ARR_SIZE - 1; i++)
4095 if (MLX5_CAP_ETH(mdev, lro_timer_supported_periods[i]) >= wanted_timeout)
4096 break;
4097
4098 return MLX5_CAP_ETH(mdev, lro_timer_supported_periods[i]);
4099}
4100
8f493ffd
SM
4101void mlx5e_build_nic_params(struct mlx5_core_dev *mdev,
4102 struct mlx5e_params *params,
4103 u16 max_channels)
f62b8bb8 4104{
369eb4ac 4105 u8 rx_cq_period_mode;
b797a684
SM
4106 u32 link_speed = 0;
4107 u32 pci_bw = 0;
2fc4bfb7 4108
6a9764ef
SM
4109 params->num_channels = max_channels;
4110 params->num_tc = 1;
2b029556 4111
0f6e4cf6
EBE
4112 mlx5e_get_max_linkspeed(mdev, &link_speed);
4113 mlx5e_get_pci_bw(mdev, &pci_bw);
4114 mlx5_core_dbg(mdev, "Max link speed = %d, PCI BW = %d\n",
4115 link_speed, pci_bw);
4116
6a9764ef
SM
4117 /* SQ */
4118 params->log_sq_size = is_kdump_kernel() ?
b4e029da
KH
4119 MLX5E_PARAMS_MINIMUM_LOG_SQ_SIZE :
4120 MLX5E_PARAMS_DEFAULT_LOG_SQ_SIZE;
461017cb 4121
b797a684 4122 /* set CQE compression */
6a9764ef 4123 params->rx_cqe_compress_def = false;
b797a684 4124 if (MLX5_CAP_GEN(mdev, cqe_compression) &&
e53eef63 4125 MLX5_CAP_GEN(mdev, vport_group_manager))
6a9764ef 4126 params->rx_cqe_compress_def = cqe_compress_heuristic(link_speed, pci_bw);
0f6e4cf6 4127
6a9764ef 4128 MLX5E_SET_PFLAG(params, MLX5E_PFLAG_RX_CQE_COMPRESS, params->rx_cqe_compress_def);
a358fcc4 4129 MLX5E_SET_PFLAG(params, MLX5E_PFLAG_RX_NO_CSUM_COMPLETE, false);
6a9764ef
SM
4130
4131 /* RQ */
4132 mlx5e_set_rq_params(mdev, params);
b797a684 4133
6a9764ef 4134 /* HW LRO */
c139dbfd 4135
5426a0b2 4136 /* TODO: && MLX5_CAP_ETH(mdev, lro_cap) */
6a9764ef 4137 if (params->rq_wq_type == MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ)
0f6e4cf6 4138 params->lro_en = hw_lro_heuristic(link_speed, pci_bw);
6a9764ef 4139 params->lro_timeout = mlx5e_choose_lro_timeout(mdev, MLX5E_DEFAULT_LRO_TIMEOUT);
b0d4660b 4140
6a9764ef 4141 /* CQ moderation params */
369eb4ac 4142 rx_cq_period_mode = MLX5_CAP_GEN(mdev, cq_period_start_from_cqe) ?
6a9764ef
SM
4143 MLX5_CQ_PERIOD_MODE_START_FROM_CQE :
4144 MLX5_CQ_PERIOD_MODE_START_FROM_EQE;
4145 params->rx_am_enabled = MLX5_CAP_GEN(mdev, cq_moderation);
369eb4ac
TG
4146 mlx5e_set_rx_cq_mode_params(params, rx_cq_period_mode);
4147 mlx5e_set_tx_cq_mode_params(params, MLX5_CQ_PERIOD_MODE_START_FROM_EQE);
9908aa29 4148
6a9764ef
SM
4149 /* TX inline */
4150 params->tx_max_inline = mlx5e_get_max_inline_cap(mdev);
fbcb127e 4151 params->tx_min_inline_mode = mlx5e_params_calculate_tx_min_inline(mdev);
a6f402e4 4152
6a9764ef
SM
4153 /* RSS */
4154 params->rss_hfunc = ETH_RSS_HASH_XOR;
4155 netdev_rss_key_fill(params->toeplitz_hash_key, sizeof(params->toeplitz_hash_key));
d4b6c488 4156 mlx5e_build_default_indir_rqt(params->indirection_rqt,
6a9764ef
SM
4157 MLX5E_INDIR_RQT_SIZE, max_channels);
4158}
f62b8bb8 4159
6a9764ef
SM
4160static void mlx5e_build_nic_netdev_priv(struct mlx5_core_dev *mdev,
4161 struct net_device *netdev,
4162 const struct mlx5e_profile *profile,
4163 void *ppriv)
4164{
4165 struct mlx5e_priv *priv = netdev_priv(netdev);
57afead5 4166
6a9764ef
SM
4167 priv->mdev = mdev;
4168 priv->netdev = netdev;
4169 priv->profile = profile;
4170 priv->ppriv = ppriv;
79c48764 4171 priv->msglevel = MLX5E_MSG_LEVEL;
c139dbfd 4172 priv->hard_mtu = MLX5E_ETH_HARD_MTU;
2d75b2bc 4173
6a9764ef 4174 mlx5e_build_nic_params(mdev, &priv->channels.params, profile->max_nch(mdev));
9908aa29 4175
f62b8bb8
AV
4176 mutex_init(&priv->state_lock);
4177
4178 INIT_WORK(&priv->update_carrier_work, mlx5e_update_carrier_work);
4179 INIT_WORK(&priv->set_rx_mode_work, mlx5e_set_rx_mode_work);
3947ca18 4180 INIT_WORK(&priv->tx_timeout_work, mlx5e_tx_timeout_work);
f62b8bb8 4181 INIT_DELAYED_WORK(&priv->update_stats_work, mlx5e_update_stats_work);
237f258c
FD
4182
4183 mlx5e_timestamp_init(priv);
f62b8bb8
AV
4184}
4185
4186static void mlx5e_set_netdev_dev_addr(struct net_device *netdev)
4187{
4188 struct mlx5e_priv *priv = netdev_priv(netdev);
4189
e1d7d349 4190 mlx5_query_nic_vport_mac_address(priv->mdev, 0, netdev->dev_addr);
108805fc
SM
4191 if (is_zero_ether_addr(netdev->dev_addr) &&
4192 !MLX5_CAP_GEN(priv->mdev, vport_group_manager)) {
4193 eth_hw_addr_random(netdev);
4194 mlx5_core_info(priv->mdev, "Assigned random MAC address %pM\n", netdev->dev_addr);
4195 }
f62b8bb8
AV
4196}
4197
73561b4b 4198#if IS_ENABLED(CONFIG_MLX5_ESWITCH)
cb67b832
HHZ
4199static const struct switchdev_ops mlx5e_switchdev_ops = {
4200 .switchdev_port_attr_get = mlx5e_attr_get,
4201};
e80541ec 4202#endif
cb67b832 4203
6bfd390b 4204static void mlx5e_build_nic_netdev(struct net_device *netdev)
f62b8bb8
AV
4205{
4206 struct mlx5e_priv *priv = netdev_priv(netdev);
4207 struct mlx5_core_dev *mdev = priv->mdev;
94cb1ebb
EBE
4208 bool fcs_supported;
4209 bool fcs_enabled;
f62b8bb8
AV
4210
4211 SET_NETDEV_DEV(netdev, &mdev->pdev->dev);
4212
e80541ec
SM
4213 netdev->netdev_ops = &mlx5e_netdev_ops;
4214
08fb1dac 4215#ifdef CONFIG_MLX5_CORE_EN_DCB
e80541ec
SM
4216 if (MLX5_CAP_GEN(mdev, vport_group_manager) && MLX5_CAP_GEN(mdev, qos))
4217 netdev->dcbnl_ops = &mlx5e_dcbnl_ops;
08fb1dac 4218#endif
66e49ded 4219
f62b8bb8
AV
4220 netdev->watchdog_timeo = 15 * HZ;
4221
4222 netdev->ethtool_ops = &mlx5e_ethtool_ops;
4223
12be4b21 4224 netdev->vlan_features |= NETIF_F_SG;
f62b8bb8
AV
4225 netdev->vlan_features |= NETIF_F_IP_CSUM;
4226 netdev->vlan_features |= NETIF_F_IPV6_CSUM;
4227 netdev->vlan_features |= NETIF_F_GRO;
4228 netdev->vlan_features |= NETIF_F_TSO;
4229 netdev->vlan_features |= NETIF_F_TSO6;
4230 netdev->vlan_features |= NETIF_F_RXCSUM;
4231 netdev->vlan_features |= NETIF_F_RXHASH;
4232
4233 if (!!MLX5_CAP_ETH(mdev, lro_cap))
4234 netdev->vlan_features |= NETIF_F_LRO;
4235
4236 netdev->hw_features = netdev->vlan_features;
e4cf27bd 4237 netdev->hw_features |= NETIF_F_HW_VLAN_CTAG_TX;
f62b8bb8
AV
4238 netdev->hw_features |= NETIF_F_HW_VLAN_CTAG_RX;
4239 netdev->hw_features |= NETIF_F_HW_VLAN_CTAG_FILTER;
4382c7b9 4240 netdev->hw_features |= NETIF_F_HW_VLAN_STAG_TX;
f62b8bb8 4241
27299841
GP
4242 if (mlx5e_vxlan_allowed(mdev) || MLX5_CAP_ETH(mdev, tunnel_stateless_gre)) {
4243 netdev->hw_features |= NETIF_F_GSO_PARTIAL;
b3f63c3d 4244 netdev->hw_enc_features |= NETIF_F_IP_CSUM;
f3ed653c 4245 netdev->hw_enc_features |= NETIF_F_IPV6_CSUM;
b3f63c3d
MF
4246 netdev->hw_enc_features |= NETIF_F_TSO;
4247 netdev->hw_enc_features |= NETIF_F_TSO6;
27299841
GP
4248 netdev->hw_enc_features |= NETIF_F_GSO_PARTIAL;
4249 }
4250
4251 if (mlx5e_vxlan_allowed(mdev)) {
4252 netdev->hw_features |= NETIF_F_GSO_UDP_TUNNEL |
4253 NETIF_F_GSO_UDP_TUNNEL_CSUM;
4254 netdev->hw_enc_features |= NETIF_F_GSO_UDP_TUNNEL |
4255 NETIF_F_GSO_UDP_TUNNEL_CSUM;
b49663c8 4256 netdev->gso_partial_features = NETIF_F_GSO_UDP_TUNNEL_CSUM;
b3f63c3d
MF
4257 }
4258
27299841
GP
4259 if (MLX5_CAP_ETH(mdev, tunnel_stateless_gre)) {
4260 netdev->hw_features |= NETIF_F_GSO_GRE |
4261 NETIF_F_GSO_GRE_CSUM;
4262 netdev->hw_enc_features |= NETIF_F_GSO_GRE |
4263 NETIF_F_GSO_GRE_CSUM;
4264 netdev->gso_partial_features |= NETIF_F_GSO_GRE |
4265 NETIF_F_GSO_GRE_CSUM;
4266 }
4267
94cb1ebb
EBE
4268 mlx5_query_port_fcs(mdev, &fcs_supported, &fcs_enabled);
4269
4270 if (fcs_supported)
4271 netdev->hw_features |= NETIF_F_RXALL;
4272
102722fc
GE
4273 if (MLX5_CAP_ETH(mdev, scatter_fcs))
4274 netdev->hw_features |= NETIF_F_RXFCS;
4275
f62b8bb8 4276 netdev->features = netdev->hw_features;
6a9764ef 4277 if (!priv->channels.params.lro_en)
f62b8bb8
AV
4278 netdev->features &= ~NETIF_F_LRO;
4279
94cb1ebb
EBE
4280 if (fcs_enabled)
4281 netdev->features &= ~NETIF_F_RXALL;
4282
102722fc
GE
4283 if (!priv->channels.params.scatter_fcs_en)
4284 netdev->features &= ~NETIF_F_RXFCS;
4285
e8f887ac
AV
4286#define FT_CAP(f) MLX5_CAP_FLOWTABLE(mdev, flow_table_properties_nic_receive.f)
4287 if (FT_CAP(flow_modify_en) &&
4288 FT_CAP(modify_root) &&
4289 FT_CAP(identified_miss_table_mode) &&
1cabe6b0
MG
4290 FT_CAP(flow_table_modify)) {
4291 netdev->hw_features |= NETIF_F_HW_TC;
4292#ifdef CONFIG_RFS_ACCEL
4293 netdev->hw_features |= NETIF_F_NTUPLE;
4294#endif
4295 }
e8f887ac 4296
f62b8bb8 4297 netdev->features |= NETIF_F_HIGHDMA;
7d92d580 4298 netdev->features |= NETIF_F_HW_VLAN_STAG_FILTER;
f62b8bb8
AV
4299
4300 netdev->priv_flags |= IFF_UNICAST_FLT;
4301
4302 mlx5e_set_netdev_dev_addr(netdev);
cb67b832 4303
73561b4b 4304#if IS_ENABLED(CONFIG_MLX5_ESWITCH)
4811bc57 4305 if (MLX5_ESWITCH_MANAGER(mdev))
cb67b832
HHZ
4306 netdev->switchdev_ops = &mlx5e_switchdev_ops;
4307#endif
547eede0
IT
4308
4309 mlx5e_ipsec_build_netdev(priv);
f62b8bb8
AV
4310}
4311
593cf338
RS
4312static void mlx5e_create_q_counter(struct mlx5e_priv *priv)
4313{
4314 struct mlx5_core_dev *mdev = priv->mdev;
4315 int err;
4316
4317 err = mlx5_core_alloc_q_counter(mdev, &priv->q_counter);
4318 if (err) {
4319 mlx5_core_warn(mdev, "alloc queue counter failed, %d\n", err);
4320 priv->q_counter = 0;
4321 }
4322}
4323
4324static void mlx5e_destroy_q_counter(struct mlx5e_priv *priv)
4325{
4326 if (!priv->q_counter)
4327 return;
4328
4329 mlx5_core_dealloc_q_counter(priv->mdev, priv->q_counter);
4330}
4331
6bfd390b
HHZ
4332static void mlx5e_nic_init(struct mlx5_core_dev *mdev,
4333 struct net_device *netdev,
127ea380
HHZ
4334 const struct mlx5e_profile *profile,
4335 void *ppriv)
6bfd390b
HHZ
4336{
4337 struct mlx5e_priv *priv = netdev_priv(netdev);
547eede0 4338 int err;
6bfd390b 4339
127ea380 4340 mlx5e_build_nic_netdev_priv(mdev, netdev, profile, ppriv);
547eede0
IT
4341 err = mlx5e_ipsec_init(priv);
4342 if (err)
4343 mlx5_core_err(mdev, "IPSec initialization failed, %d\n", err);
6bfd390b
HHZ
4344 mlx5e_build_nic_netdev(netdev);
4345 mlx5e_vxlan_init(priv);
4346}
4347
4348static void mlx5e_nic_cleanup(struct mlx5e_priv *priv)
4349{
547eede0 4350 mlx5e_ipsec_cleanup(priv);
6bfd390b 4351 mlx5e_vxlan_cleanup(priv);
127ea380 4352
6a9764ef
SM
4353 if (priv->channels.params.xdp_prog)
4354 bpf_prog_put(priv->channels.params.xdp_prog);
6bfd390b
HHZ
4355}
4356
4357static int mlx5e_init_nic_rx(struct mlx5e_priv *priv)
4358{
4359 struct mlx5_core_dev *mdev = priv->mdev;
4360 int err;
6bfd390b 4361
8f493ffd
SM
4362 err = mlx5e_create_indirect_rqt(priv);
4363 if (err)
6bfd390b 4364 return err;
6bfd390b
HHZ
4365
4366 err = mlx5e_create_direct_rqts(priv);
8f493ffd 4367 if (err)
6bfd390b 4368 goto err_destroy_indirect_rqts;
6bfd390b
HHZ
4369
4370 err = mlx5e_create_indirect_tirs(priv);
8f493ffd 4371 if (err)
6bfd390b 4372 goto err_destroy_direct_rqts;
6bfd390b
HHZ
4373
4374 err = mlx5e_create_direct_tirs(priv);
8f493ffd 4375 if (err)
6bfd390b 4376 goto err_destroy_indirect_tirs;
6bfd390b
HHZ
4377
4378 err = mlx5e_create_flow_steering(priv);
4379 if (err) {
4380 mlx5_core_warn(mdev, "create flow steering failed, %d\n", err);
4381 goto err_destroy_direct_tirs;
4382 }
4383
4384 err = mlx5e_tc_init(priv);
4385 if (err)
4386 goto err_destroy_flow_steering;
4387
4388 return 0;
4389
4390err_destroy_flow_steering:
4391 mlx5e_destroy_flow_steering(priv);
4392err_destroy_direct_tirs:
4393 mlx5e_destroy_direct_tirs(priv);
4394err_destroy_indirect_tirs:
4395 mlx5e_destroy_indirect_tirs(priv);
4396err_destroy_direct_rqts:
8f493ffd 4397 mlx5e_destroy_direct_rqts(priv);
6bfd390b
HHZ
4398err_destroy_indirect_rqts:
4399 mlx5e_destroy_rqt(priv, &priv->indir_rqt);
4400 return err;
4401}
4402
4403static void mlx5e_cleanup_nic_rx(struct mlx5e_priv *priv)
4404{
6bfd390b
HHZ
4405 mlx5e_tc_cleanup(priv);
4406 mlx5e_destroy_flow_steering(priv);
4407 mlx5e_destroy_direct_tirs(priv);
4408 mlx5e_destroy_indirect_tirs(priv);
8f493ffd 4409 mlx5e_destroy_direct_rqts(priv);
6bfd390b
HHZ
4410 mlx5e_destroy_rqt(priv, &priv->indir_rqt);
4411}
4412
4413static int mlx5e_init_nic_tx(struct mlx5e_priv *priv)
4414{
4415 int err;
4416
4417 err = mlx5e_create_tises(priv);
4418 if (err) {
4419 mlx5_core_warn(priv->mdev, "create tises failed, %d\n", err);
4420 return err;
4421 }
4422
4423#ifdef CONFIG_MLX5_CORE_EN_DCB
e207b7e9 4424 mlx5e_dcbnl_initialize(priv);
6bfd390b
HHZ
4425#endif
4426 return 0;
4427}
4428
4429static void mlx5e_nic_enable(struct mlx5e_priv *priv)
4430{
4431 struct net_device *netdev = priv->netdev;
4432 struct mlx5_core_dev *mdev = priv->mdev;
2c3b5bee
SM
4433 u16 max_mtu;
4434
4435 mlx5e_init_l2_addr(priv);
4436
63bfd399
EBE
4437 /* Marking the link as currently not needed by the Driver */
4438 if (!netif_running(netdev))
4439 mlx5_set_port_admin_status(mdev, MLX5_PORT_DOWN);
4440
2c3b5bee
SM
4441 /* MTU range: 68 - hw-specific max */
4442 netdev->min_mtu = ETH_MIN_MTU;
4443 mlx5_query_port_max_mtu(priv->mdev, &max_mtu, 1);
c139dbfd 4444 netdev->max_mtu = MLX5E_HW2SW_MTU(priv, max_mtu);
2c3b5bee 4445 mlx5e_set_dev_port_mtu(priv);
6bfd390b 4446
7907f23a
AH
4447 mlx5_lag_add(mdev, netdev);
4448
6bfd390b 4449 mlx5e_enable_async_events(priv);
127ea380 4450
4811bc57 4451 if (MLX5_ESWITCH_MANAGER(priv->mdev))
1d447a39 4452 mlx5e_register_vport_reps(priv);
2c3b5bee 4453
610e89e0
SM
4454 if (netdev->reg_state != NETREG_REGISTERED)
4455 return;
2a5e7a13
HN
4456#ifdef CONFIG_MLX5_CORE_EN_DCB
4457 mlx5e_dcbnl_init_app(priv);
4458#endif
610e89e0
SM
4459
4460 queue_work(priv->wq, &priv->set_rx_mode_work);
2c3b5bee
SM
4461
4462 rtnl_lock();
4463 if (netif_running(netdev))
4464 mlx5e_open(netdev);
4465 netif_device_attach(netdev);
4466 rtnl_unlock();
6bfd390b
HHZ
4467}
4468
4469static void mlx5e_nic_disable(struct mlx5e_priv *priv)
4470{
3deef8ce 4471 struct mlx5_core_dev *mdev = priv->mdev;
3deef8ce 4472
2a5e7a13
HN
4473#ifdef CONFIG_MLX5_CORE_EN_DCB
4474 if (priv->netdev->reg_state == NETREG_REGISTERED)
4475 mlx5e_dcbnl_delete_app(priv);
4476#endif
4477
2c3b5bee
SM
4478 rtnl_lock();
4479 if (netif_running(priv->netdev))
4480 mlx5e_close(priv->netdev);
4481 netif_device_detach(priv->netdev);
4482 rtnl_unlock();
4483
6bfd390b 4484 queue_work(priv->wq, &priv->set_rx_mode_work);
1d447a39 4485
4811bc57 4486 if (MLX5_ESWITCH_MANAGER(priv->mdev))
1d447a39
SM
4487 mlx5e_unregister_vport_reps(priv);
4488
6bfd390b 4489 mlx5e_disable_async_events(priv);
3deef8ce 4490 mlx5_lag_remove(mdev);
6bfd390b
HHZ
4491}
4492
4493static const struct mlx5e_profile mlx5e_nic_profile = {
4494 .init = mlx5e_nic_init,
4495 .cleanup = mlx5e_nic_cleanup,
4496 .init_rx = mlx5e_init_nic_rx,
4497 .cleanup_rx = mlx5e_cleanup_nic_rx,
4498 .init_tx = mlx5e_init_nic_tx,
4499 .cleanup_tx = mlx5e_cleanup_nic_tx,
4500 .enable = mlx5e_nic_enable,
4501 .disable = mlx5e_nic_disable,
3834a5e6 4502 .update_stats = mlx5e_update_ndo_stats,
6bfd390b 4503 .max_nch = mlx5e_get_max_num_channels,
7ca42c80 4504 .update_carrier = mlx5e_update_carrier,
20fd0c19
SM
4505 .rx_handlers.handle_rx_cqe = mlx5e_handle_rx_cqe,
4506 .rx_handlers.handle_rx_cqe_mpwqe = mlx5e_handle_rx_cqe_mpwrq,
6bfd390b
HHZ
4507 .max_tc = MLX5E_MAX_NUM_TC,
4508};
4509
2c3b5bee
SM
4510/* mlx5e generic netdev management API (move to en_common.c) */
4511
26e59d80
MHY
4512struct net_device *mlx5e_create_netdev(struct mlx5_core_dev *mdev,
4513 const struct mlx5e_profile *profile,
4514 void *ppriv)
f62b8bb8 4515{
26e59d80 4516 int nch = profile->max_nch(mdev);
f62b8bb8
AV
4517 struct net_device *netdev;
4518 struct mlx5e_priv *priv;
f62b8bb8 4519
08fb1dac 4520 netdev = alloc_etherdev_mqs(sizeof(struct mlx5e_priv),
6bfd390b 4521 nch * profile->max_tc,
08fb1dac 4522 nch);
f62b8bb8
AV
4523 if (!netdev) {
4524 mlx5_core_err(mdev, "alloc_etherdev_mqs() failed\n");
4525 return NULL;
4526 }
4527
be4891af
SM
4528#ifdef CONFIG_RFS_ACCEL
4529 netdev->rx_cpu_rmap = mdev->rmap;
4530#endif
4531
127ea380 4532 profile->init(mdev, netdev, profile, ppriv);
f62b8bb8
AV
4533
4534 netif_carrier_off(netdev);
4535
4536 priv = netdev_priv(netdev);
4537
7bb29755
MF
4538 priv->wq = create_singlethread_workqueue("mlx5e");
4539 if (!priv->wq)
26e59d80
MHY
4540 goto err_cleanup_nic;
4541
4542 return netdev;
4543
4544err_cleanup_nic:
31ac9338
OG
4545 if (profile->cleanup)
4546 profile->cleanup(priv);
26e59d80
MHY
4547 free_netdev(netdev);
4548
4549 return NULL;
4550}
4551
2c3b5bee 4552int mlx5e_attach_netdev(struct mlx5e_priv *priv)
26e59d80 4553{
2c3b5bee 4554 struct mlx5_core_dev *mdev = priv->mdev;
26e59d80 4555 const struct mlx5e_profile *profile;
4d7d3ed9 4556 int max_nch;
26e59d80
MHY
4557 int err;
4558
26e59d80
MHY
4559 profile = priv->profile;
4560 clear_bit(MLX5E_STATE_DESTROYING, &priv->state);
7bb29755 4561
4d7d3ed9
YA
4562 /* max number of channels may have changed */
4563 max_nch = mlx5e_get_max_num_channels(priv->mdev);
4564 if (priv->channels.params.num_channels > max_nch) {
4565 mlx5_core_warn(priv->mdev, "MLX5E: Reducing number of channels to %d\n", max_nch);
4566 priv->channels.params.num_channels = max_nch;
4567 mlx5e_build_default_indir_rqt(priv->channels.params.indirection_rqt,
4568 MLX5E_INDIR_RQT_SIZE, max_nch);
4569 }
4570
6bfd390b
HHZ
4571 err = profile->init_tx(priv);
4572 if (err)
ec8b9981 4573 goto out;
5c50368f 4574
a43b25da 4575 err = mlx5e_open_drop_rq(mdev, &priv->drop_rq);
5c50368f
AS
4576 if (err) {
4577 mlx5_core_err(mdev, "open drop rq failed, %d\n", err);
6bfd390b 4578 goto err_cleanup_tx;
5c50368f
AS
4579 }
4580
6bfd390b
HHZ
4581 err = profile->init_rx(priv);
4582 if (err)
5c50368f 4583 goto err_close_drop_rq;
5c50368f 4584
593cf338
RS
4585 mlx5e_create_q_counter(priv);
4586
6bfd390b
HHZ
4587 if (profile->enable)
4588 profile->enable(priv);
f62b8bb8 4589
26e59d80 4590 return 0;
5c50368f
AS
4591
4592err_close_drop_rq:
a43b25da 4593 mlx5e_close_drop_rq(&priv->drop_rq);
5c50368f 4594
6bfd390b
HHZ
4595err_cleanup_tx:
4596 profile->cleanup_tx(priv);
5c50368f 4597
26e59d80
MHY
4598out:
4599 return err;
f62b8bb8
AV
4600}
4601
2c3b5bee 4602void mlx5e_detach_netdev(struct mlx5e_priv *priv)
26e59d80 4603{
26e59d80
MHY
4604 const struct mlx5e_profile *profile = priv->profile;
4605
4606 set_bit(MLX5E_STATE_DESTROYING, &priv->state);
26e59d80 4607
37f304d1
SM
4608 if (profile->disable)
4609 profile->disable(priv);
4610 flush_workqueue(priv->wq);
4611
26e59d80
MHY
4612 mlx5e_destroy_q_counter(priv);
4613 profile->cleanup_rx(priv);
a43b25da 4614 mlx5e_close_drop_rq(&priv->drop_rq);
26e59d80 4615 profile->cleanup_tx(priv);
26e59d80
MHY
4616 cancel_delayed_work_sync(&priv->update_stats_work);
4617}
4618
2c3b5bee
SM
4619void mlx5e_destroy_netdev(struct mlx5e_priv *priv)
4620{
4621 const struct mlx5e_profile *profile = priv->profile;
4622 struct net_device *netdev = priv->netdev;
4623
4624 destroy_workqueue(priv->wq);
4625 if (profile->cleanup)
4626 profile->cleanup(priv);
4627 free_netdev(netdev);
4628}
4629
26e59d80
MHY
4630/* mlx5e_attach and mlx5e_detach scope should be only creating/destroying
4631 * hardware contexts and to connect it to the current netdev.
4632 */
4633static int mlx5e_attach(struct mlx5_core_dev *mdev, void *vpriv)
4634{
4635 struct mlx5e_priv *priv = vpriv;
4636 struct net_device *netdev = priv->netdev;
4637 int err;
4638
4639 if (netif_device_present(netdev))
4640 return 0;
4641
4642 err = mlx5e_create_mdev_resources(mdev);
4643 if (err)
4644 return err;
4645
2c3b5bee 4646 err = mlx5e_attach_netdev(priv);
26e59d80
MHY
4647 if (err) {
4648 mlx5e_destroy_mdev_resources(mdev);
4649 return err;
4650 }
4651
4652 return 0;
4653}
4654
4655static void mlx5e_detach(struct mlx5_core_dev *mdev, void *vpriv)
4656{
4657 struct mlx5e_priv *priv = vpriv;
4658 struct net_device *netdev = priv->netdev;
4659
4660 if (!netif_device_present(netdev))
4661 return;
4662
2c3b5bee 4663 mlx5e_detach_netdev(priv);
26e59d80
MHY
4664 mlx5e_destroy_mdev_resources(mdev);
4665}
4666
b50d292b
HHZ
4667static void *mlx5e_add(struct mlx5_core_dev *mdev)
4668{
07c9f1e5
SM
4669 struct net_device *netdev;
4670 void *rpriv = NULL;
26e59d80 4671 void *priv;
26e59d80 4672 int err;
b50d292b 4673
26e59d80
MHY
4674 err = mlx5e_check_required_hca_cap(mdev);
4675 if (err)
b50d292b
HHZ
4676 return NULL;
4677
e80541ec 4678#ifdef CONFIG_MLX5_ESWITCH
4811bc57 4679 if (MLX5_ESWITCH_MANAGER(mdev)) {
07c9f1e5 4680 rpriv = mlx5e_alloc_nic_rep_priv(mdev);
1d447a39 4681 if (!rpriv) {
07c9f1e5 4682 mlx5_core_warn(mdev, "Failed to alloc NIC rep priv data\n");
1d447a39
SM
4683 return NULL;
4684 }
1d447a39 4685 }
e80541ec 4686#endif
127ea380 4687
1d447a39 4688 netdev = mlx5e_create_netdev(mdev, &mlx5e_nic_profile, rpriv);
26e59d80
MHY
4689 if (!netdev) {
4690 mlx5_core_err(mdev, "mlx5e_create_netdev failed\n");
07c9f1e5 4691 goto err_free_rpriv;
26e59d80
MHY
4692 }
4693
4694 priv = netdev_priv(netdev);
4695
4696 err = mlx5e_attach(mdev, priv);
4697 if (err) {
4698 mlx5_core_err(mdev, "mlx5e_attach failed, %d\n", err);
4699 goto err_destroy_netdev;
4700 }
4701
4702 err = register_netdev(netdev);
4703 if (err) {
4704 mlx5_core_err(mdev, "register_netdev failed, %d\n", err);
4705 goto err_detach;
b50d292b 4706 }
26e59d80 4707
2a5e7a13
HN
4708#ifdef CONFIG_MLX5_CORE_EN_DCB
4709 mlx5e_dcbnl_init_app(priv);
4710#endif
26e59d80
MHY
4711 return priv;
4712
4713err_detach:
4714 mlx5e_detach(mdev, priv);
26e59d80 4715err_destroy_netdev:
2c3b5bee 4716 mlx5e_destroy_netdev(priv);
07c9f1e5 4717err_free_rpriv:
1d447a39 4718 kfree(rpriv);
26e59d80 4719 return NULL;
b50d292b
HHZ
4720}
4721
b50d292b
HHZ
4722static void mlx5e_remove(struct mlx5_core_dev *mdev, void *vpriv)
4723{
4724 struct mlx5e_priv *priv = vpriv;
1d447a39 4725 void *ppriv = priv->ppriv;
127ea380 4726
2a5e7a13
HN
4727#ifdef CONFIG_MLX5_CORE_EN_DCB
4728 mlx5e_dcbnl_delete_app(priv);
4729#endif
5e1e93c7 4730 unregister_netdev(priv->netdev);
26e59d80 4731 mlx5e_detach(mdev, vpriv);
2c3b5bee 4732 mlx5e_destroy_netdev(priv);
1d447a39 4733 kfree(ppriv);
b50d292b
HHZ
4734}
4735
f62b8bb8
AV
4736static void *mlx5e_get_netdev(void *vpriv)
4737{
4738 struct mlx5e_priv *priv = vpriv;
4739
4740 return priv->netdev;
4741}
4742
4743static struct mlx5_interface mlx5e_interface = {
b50d292b
HHZ
4744 .add = mlx5e_add,
4745 .remove = mlx5e_remove,
26e59d80
MHY
4746 .attach = mlx5e_attach,
4747 .detach = mlx5e_detach,
f62b8bb8
AV
4748 .event = mlx5e_async_event,
4749 .protocol = MLX5_INTERFACE_PROTOCOL_ETH,
4750 .get_dev = mlx5e_get_netdev,
4751};
4752
4753void mlx5e_init(void)
4754{
2ac9cfe7 4755 mlx5e_ipsec_build_inverse_table();
665bc539 4756 mlx5e_build_ptys2ethtool_map();
f62b8bb8
AV
4757 mlx5_register_interface(&mlx5e_interface);
4758}
4759
4760void mlx5e_cleanup(void)
4761{
4762 mlx5_unregister_interface(&mlx5e_interface);
4763}