]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blame - drivers/net/ethernet/mellanox/mlx5/core/en_main.c
net/mlx5e: IPSec, Innova IPSec offload infrastructure
[mirror_ubuntu-bionic-kernel.git] / drivers / net / ethernet / mellanox / mlx5 / core / en_main.c
CommitLineData
f62b8bb8 1/*
b3f63c3d 2 * Copyright (c) 2015-2016, Mellanox Technologies. All rights reserved.
f62b8bb8
AV
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
32
e8f887ac
AV
33#include <net/tc_act/tc_gact.h>
34#include <net/pkt_cls.h>
86d722ad 35#include <linux/mlx5/fs.h>
b3f63c3d 36#include <net/vxlan.h>
86994156 37#include <linux/bpf.h>
1d447a39 38#include "eswitch.h"
f62b8bb8 39#include "en.h"
e8f887ac 40#include "en_tc.h"
1d447a39 41#include "en_rep.h"
547eede0 42#include "en_accel/ipsec.h"
b3f63c3d 43#include "vxlan.h"
f62b8bb8
AV
44
45struct mlx5e_rq_param {
cb3c7fd4
GR
46 u32 rqc[MLX5_ST_SZ_DW(rqc)];
47 struct mlx5_wq_param wq;
f62b8bb8
AV
48};
49
50struct mlx5e_sq_param {
51 u32 sqc[MLX5_ST_SZ_DW(sqc)];
52 struct mlx5_wq_param wq;
53};
54
55struct mlx5e_cq_param {
56 u32 cqc[MLX5_ST_SZ_DW(cqc)];
57 struct mlx5_wq_param wq;
58 u16 eq_ix;
9908aa29 59 u8 cq_period_mode;
f62b8bb8
AV
60};
61
62struct mlx5e_channel_param {
63 struct mlx5e_rq_param rq;
64 struct mlx5e_sq_param sq;
b5503b99 65 struct mlx5e_sq_param xdp_sq;
d3c9bc27 66 struct mlx5e_sq_param icosq;
f62b8bb8
AV
67 struct mlx5e_cq_param rx_cq;
68 struct mlx5e_cq_param tx_cq;
d3c9bc27 69 struct mlx5e_cq_param icosq_cq;
f62b8bb8
AV
70};
71
2fc4bfb7
SM
72static bool mlx5e_check_fragmented_striding_rq_cap(struct mlx5_core_dev *mdev)
73{
74 return MLX5_CAP_GEN(mdev, striding_rq) &&
75 MLX5_CAP_GEN(mdev, umr_ptr_rlky) &&
76 MLX5_CAP_ETH(mdev, reg_umr_sq);
77}
78
6a9764ef
SM
79void mlx5e_set_rq_type_params(struct mlx5_core_dev *mdev,
80 struct mlx5e_params *params, u8 rq_type)
2fc4bfb7 81{
6a9764ef
SM
82 params->rq_wq_type = rq_type;
83 params->lro_wqe_sz = MLX5E_PARAMS_DEFAULT_LRO_WQE_SZ;
84 switch (params->rq_wq_type) {
2fc4bfb7 85 case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ:
6a9764ef 86 params->log_rq_size = is_kdump_kernel() ?
b4e029da
KH
87 MLX5E_PARAMS_MINIMUM_LOG_RQ_SIZE_MPW :
88 MLX5E_PARAMS_DEFAULT_LOG_RQ_SIZE_MPW;
6a9764ef
SM
89 params->mpwqe_log_stride_sz =
90 MLX5E_GET_PFLAG(params, MLX5E_PFLAG_RX_CQE_COMPRESS) ?
91 MLX5_MPWRQ_CQE_CMPRS_LOG_STRIDE_SZ(mdev) :
92 MLX5_MPWRQ_DEF_LOG_STRIDE_SZ(mdev);
93 params->mpwqe_log_num_strides = MLX5_MPWRQ_LOG_WQE_SZ -
94 params->mpwqe_log_stride_sz;
2fc4bfb7
SM
95 break;
96 default: /* MLX5_WQ_TYPE_LINKED_LIST */
6a9764ef 97 params->log_rq_size = is_kdump_kernel() ?
b4e029da
KH
98 MLX5E_PARAMS_MINIMUM_LOG_RQ_SIZE :
99 MLX5E_PARAMS_DEFAULT_LOG_RQ_SIZE;
bce2b2bf
TT
100 params->rq_headroom = params->xdp_prog ?
101 XDP_PACKET_HEADROOM : MLX5_RX_HEADROOM;
102 params->rq_headroom += NET_IP_ALIGN;
4078e637
TT
103
104 /* Extra room needed for build_skb */
bce2b2bf 105 params->lro_wqe_sz -= params->rq_headroom +
4078e637 106 SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
2fc4bfb7 107 }
2fc4bfb7 108
6a9764ef
SM
109 mlx5_core_info(mdev, "MLX5E: StrdRq(%d) RqSz(%ld) StrdSz(%ld) RxCqeCmprss(%d)\n",
110 params->rq_wq_type == MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ,
111 BIT(params->log_rq_size),
112 BIT(params->mpwqe_log_stride_sz),
113 MLX5E_GET_PFLAG(params, MLX5E_PFLAG_RX_CQE_COMPRESS));
2fc4bfb7
SM
114}
115
6a9764ef 116static void mlx5e_set_rq_params(struct mlx5_core_dev *mdev, struct mlx5e_params *params)
2fc4bfb7 117{
6a9764ef
SM
118 u8 rq_type = mlx5e_check_fragmented_striding_rq_cap(mdev) &&
119 !params->xdp_prog ?
2fc4bfb7
SM
120 MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ :
121 MLX5_WQ_TYPE_LINKED_LIST;
6a9764ef 122 mlx5e_set_rq_type_params(mdev, params, rq_type);
2fc4bfb7
SM
123}
124
f62b8bb8
AV
125static void mlx5e_update_carrier(struct mlx5e_priv *priv)
126{
127 struct mlx5_core_dev *mdev = priv->mdev;
128 u8 port_state;
129
130 port_state = mlx5_query_vport_state(mdev,
e53eef63
OG
131 MLX5_QUERY_VPORT_STATE_IN_OP_MOD_VNIC_VPORT,
132 0);
f62b8bb8 133
87424ad5
SD
134 if (port_state == VPORT_STATE_UP) {
135 netdev_info(priv->netdev, "Link up\n");
f62b8bb8 136 netif_carrier_on(priv->netdev);
87424ad5
SD
137 } else {
138 netdev_info(priv->netdev, "Link down\n");
f62b8bb8 139 netif_carrier_off(priv->netdev);
87424ad5 140 }
f62b8bb8
AV
141}
142
143static void mlx5e_update_carrier_work(struct work_struct *work)
144{
145 struct mlx5e_priv *priv = container_of(work, struct mlx5e_priv,
146 update_carrier_work);
147
148 mutex_lock(&priv->state_lock);
149 if (test_bit(MLX5E_STATE_OPENED, &priv->state))
7ca42c80
ES
150 if (priv->profile->update_carrier)
151 priv->profile->update_carrier(priv);
f62b8bb8
AV
152 mutex_unlock(&priv->state_lock);
153}
154
3947ca18
DJ
155static void mlx5e_tx_timeout_work(struct work_struct *work)
156{
157 struct mlx5e_priv *priv = container_of(work, struct mlx5e_priv,
158 tx_timeout_work);
159 int err;
160
161 rtnl_lock();
162 mutex_lock(&priv->state_lock);
163 if (!test_bit(MLX5E_STATE_OPENED, &priv->state))
164 goto unlock;
165 mlx5e_close_locked(priv->netdev);
166 err = mlx5e_open_locked(priv->netdev);
167 if (err)
168 netdev_err(priv->netdev, "mlx5e_open_locked failed recovering from a tx_timeout, err(%d).\n",
169 err);
170unlock:
171 mutex_unlock(&priv->state_lock);
172 rtnl_unlock();
173}
174
9218b44d 175static void mlx5e_update_sw_counters(struct mlx5e_priv *priv)
f62b8bb8 176{
1510d728 177 struct mlx5e_sw_stats temp, *s = &temp;
f62b8bb8
AV
178 struct mlx5e_rq_stats *rq_stats;
179 struct mlx5e_sq_stats *sq_stats;
9218b44d 180 u64 tx_offload_none = 0;
f62b8bb8
AV
181 int i, j;
182
9218b44d 183 memset(s, 0, sizeof(*s));
ff9c852f
SM
184 for (i = 0; i < priv->channels.num; i++) {
185 struct mlx5e_channel *c = priv->channels.c[i];
186
187 rq_stats = &c->rq.stats;
f62b8bb8 188
faf4478b
GP
189 s->rx_packets += rq_stats->packets;
190 s->rx_bytes += rq_stats->bytes;
bfe6d8d1
GP
191 s->rx_lro_packets += rq_stats->lro_packets;
192 s->rx_lro_bytes += rq_stats->lro_bytes;
f62b8bb8 193 s->rx_csum_none += rq_stats->csum_none;
bfe6d8d1
GP
194 s->rx_csum_complete += rq_stats->csum_complete;
195 s->rx_csum_unnecessary_inner += rq_stats->csum_unnecessary_inner;
86994156 196 s->rx_xdp_drop += rq_stats->xdp_drop;
b5503b99
SM
197 s->rx_xdp_tx += rq_stats->xdp_tx;
198 s->rx_xdp_tx_full += rq_stats->xdp_tx_full;
f62b8bb8 199 s->rx_wqe_err += rq_stats->wqe_err;
461017cb 200 s->rx_mpwqe_filler += rq_stats->mpwqe_filler;
54984407 201 s->rx_buff_alloc_err += rq_stats->buff_alloc_err;
7219ab34
TT
202 s->rx_cqe_compress_blks += rq_stats->cqe_compress_blks;
203 s->rx_cqe_compress_pkts += rq_stats->cqe_compress_pkts;
accd5883 204 s->rx_page_reuse += rq_stats->page_reuse;
4415a031
TT
205 s->rx_cache_reuse += rq_stats->cache_reuse;
206 s->rx_cache_full += rq_stats->cache_full;
207 s->rx_cache_empty += rq_stats->cache_empty;
208 s->rx_cache_busy += rq_stats->cache_busy;
f62b8bb8 209
6a9764ef 210 for (j = 0; j < priv->channels.params.num_tc; j++) {
ff9c852f 211 sq_stats = &c->sq[j].stats;
f62b8bb8 212
faf4478b
GP
213 s->tx_packets += sq_stats->packets;
214 s->tx_bytes += sq_stats->bytes;
bfe6d8d1
GP
215 s->tx_tso_packets += sq_stats->tso_packets;
216 s->tx_tso_bytes += sq_stats->tso_bytes;
217 s->tx_tso_inner_packets += sq_stats->tso_inner_packets;
218 s->tx_tso_inner_bytes += sq_stats->tso_inner_bytes;
f62b8bb8
AV
219 s->tx_queue_stopped += sq_stats->stopped;
220 s->tx_queue_wake += sq_stats->wake;
221 s->tx_queue_dropped += sq_stats->dropped;
c8cf78fe 222 s->tx_xmit_more += sq_stats->xmit_more;
bfe6d8d1
GP
223 s->tx_csum_partial_inner += sq_stats->csum_partial_inner;
224 tx_offload_none += sq_stats->csum_none;
f62b8bb8
AV
225 }
226 }
227
9218b44d 228 /* Update calculated offload counters */
bfe6d8d1
GP
229 s->tx_csum_partial = s->tx_packets - tx_offload_none - s->tx_csum_partial_inner;
230 s->rx_csum_unnecessary = s->rx_packets - s->rx_csum_none - s->rx_csum_complete;
121fcdc8 231
bfe6d8d1 232 s->link_down_events_phy = MLX5_GET(ppcnt_reg,
121fcdc8
GP
233 priv->stats.pport.phy_counters,
234 counter_set.phys_layer_cntrs.link_down_events);
1510d728 235 memcpy(&priv->stats.sw, s, sizeof(*s));
9218b44d
GP
236}
237
238static void mlx5e_update_vport_counters(struct mlx5e_priv *priv)
239{
240 int outlen = MLX5_ST_SZ_BYTES(query_vport_counter_out);
241 u32 *out = (u32 *)priv->stats.vport.query_vport_out;
c4f287c4 242 u32 in[MLX5_ST_SZ_DW(query_vport_counter_in)] = {0};
9218b44d
GP
243 struct mlx5_core_dev *mdev = priv->mdev;
244
f62b8bb8
AV
245 MLX5_SET(query_vport_counter_in, in, opcode,
246 MLX5_CMD_OP_QUERY_VPORT_COUNTER);
247 MLX5_SET(query_vport_counter_in, in, op_mod, 0);
248 MLX5_SET(query_vport_counter_in, in, other_vport, 0);
249
9218b44d
GP
250 mlx5_cmd_exec(mdev, in, sizeof(in), out, outlen);
251}
252
3834a5e6 253static void mlx5e_update_pport_counters(struct mlx5e_priv *priv, bool full)
9218b44d
GP
254{
255 struct mlx5e_pport_stats *pstats = &priv->stats.pport;
256 struct mlx5_core_dev *mdev = priv->mdev;
0883b4f4 257 u32 in[MLX5_ST_SZ_DW(ppcnt_reg)] = {0};
9218b44d 258 int sz = MLX5_ST_SZ_BYTES(ppcnt_reg);
cf678570 259 int prio;
9218b44d 260 void *out;
f62b8bb8 261
9218b44d 262 MLX5_SET(ppcnt_reg, in, local_port, 1);
f62b8bb8 263
9218b44d
GP
264 out = pstats->IEEE_802_3_counters;
265 MLX5_SET(ppcnt_reg, in, grp, MLX5_IEEE_802_3_COUNTERS_GROUP);
266 mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_PPCNT, 0, 0);
f62b8bb8 267
3834a5e6
GP
268 if (!full)
269 return;
270
9218b44d
GP
271 out = pstats->RFC_2863_counters;
272 MLX5_SET(ppcnt_reg, in, grp, MLX5_RFC_2863_COUNTERS_GROUP);
273 mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_PPCNT, 0, 0);
274
275 out = pstats->RFC_2819_counters;
276 MLX5_SET(ppcnt_reg, in, grp, MLX5_RFC_2819_COUNTERS_GROUP);
277 mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_PPCNT, 0, 0);
593cf338 278
121fcdc8
GP
279 out = pstats->phy_counters;
280 MLX5_SET(ppcnt_reg, in, grp, MLX5_PHYSICAL_LAYER_COUNTERS_GROUP);
281 mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_PPCNT, 0, 0);
282
5db0a4f6
GP
283 if (MLX5_CAP_PCAM_FEATURE(mdev, ppcnt_statistical_group)) {
284 out = pstats->phy_statistical_counters;
285 MLX5_SET(ppcnt_reg, in, grp, MLX5_PHYSICAL_LAYER_STATISTICAL_GROUP);
286 mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_PPCNT, 0, 0);
287 }
288
cf678570
GP
289 MLX5_SET(ppcnt_reg, in, grp, MLX5_PER_PRIORITY_COUNTERS_GROUP);
290 for (prio = 0; prio < NUM_PPORT_PRIO; prio++) {
291 out = pstats->per_prio_counters[prio];
292 MLX5_SET(ppcnt_reg, in, prio_tc, prio);
293 mlx5_core_access_reg(mdev, in, sz, out, sz,
294 MLX5_REG_PPCNT, 0, 0);
295 }
9218b44d
GP
296}
297
298static void mlx5e_update_q_counter(struct mlx5e_priv *priv)
299{
300 struct mlx5e_qcounter_stats *qcnt = &priv->stats.qcnt;
432609a4
GP
301 u32 out[MLX5_ST_SZ_DW(query_q_counter_out)];
302 int err;
9218b44d
GP
303
304 if (!priv->q_counter)
305 return;
306
432609a4
GP
307 err = mlx5_core_query_q_counter(priv->mdev, priv->q_counter, 0, out, sizeof(out));
308 if (err)
309 return;
310
311 qcnt->rx_out_of_buffer = MLX5_GET(query_q_counter_out, out, out_of_buffer);
9218b44d
GP
312}
313
0f7f3481
GP
314static void mlx5e_update_pcie_counters(struct mlx5e_priv *priv)
315{
316 struct mlx5e_pcie_stats *pcie_stats = &priv->stats.pcie;
317 struct mlx5_core_dev *mdev = priv->mdev;
0883b4f4 318 u32 in[MLX5_ST_SZ_DW(mpcnt_reg)] = {0};
0f7f3481
GP
319 int sz = MLX5_ST_SZ_BYTES(mpcnt_reg);
320 void *out;
0f7f3481
GP
321
322 if (!MLX5_CAP_MCAM_FEATURE(mdev, pcie_performance_group))
323 return;
324
0f7f3481
GP
325 out = pcie_stats->pcie_perf_counters;
326 MLX5_SET(mpcnt_reg, in, grp, MLX5_PCIE_PERFORMANCE_COUNTERS_GROUP);
327 mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_MPCNT, 0, 0);
0f7f3481
GP
328}
329
3834a5e6 330void mlx5e_update_stats(struct mlx5e_priv *priv, bool full)
9218b44d 331{
3834a5e6
GP
332 if (full)
333 mlx5e_update_pcie_counters(priv);
334 mlx5e_update_pport_counters(priv, full);
3dd69e3d
SM
335 mlx5e_update_vport_counters(priv);
336 mlx5e_update_q_counter(priv);
121fcdc8 337 mlx5e_update_sw_counters(priv);
f62b8bb8
AV
338}
339
3834a5e6
GP
340static void mlx5e_update_ndo_stats(struct mlx5e_priv *priv)
341{
342 mlx5e_update_stats(priv, false);
343}
344
cb67b832 345void mlx5e_update_stats_work(struct work_struct *work)
f62b8bb8
AV
346{
347 struct delayed_work *dwork = to_delayed_work(work);
348 struct mlx5e_priv *priv = container_of(dwork, struct mlx5e_priv,
349 update_stats_work);
350 mutex_lock(&priv->state_lock);
351 if (test_bit(MLX5E_STATE_OPENED, &priv->state)) {
6bfd390b 352 priv->profile->update_stats(priv);
7bb29755
MF
353 queue_delayed_work(priv->wq, dwork,
354 msecs_to_jiffies(MLX5E_UPDATE_STATS_INTERVAL));
f62b8bb8
AV
355 }
356 mutex_unlock(&priv->state_lock);
357}
358
daa21560
TT
359static void mlx5e_async_event(struct mlx5_core_dev *mdev, void *vpriv,
360 enum mlx5_dev_event event, unsigned long param)
f62b8bb8 361{
daa21560 362 struct mlx5e_priv *priv = vpriv;
ee7f1220
EE
363 struct ptp_clock_event ptp_event;
364 struct mlx5_eqe *eqe = NULL;
daa21560 365
e0f46eb9 366 if (!test_bit(MLX5E_STATE_ASYNC_EVENTS_ENABLED, &priv->state))
daa21560
TT
367 return;
368
f62b8bb8
AV
369 switch (event) {
370 case MLX5_DEV_EVENT_PORT_UP:
371 case MLX5_DEV_EVENT_PORT_DOWN:
7bb29755 372 queue_work(priv->wq, &priv->update_carrier_work);
f62b8bb8 373 break;
ee7f1220
EE
374 case MLX5_DEV_EVENT_PPS:
375 eqe = (struct mlx5_eqe *)param;
376 ptp_event.type = PTP_CLOCK_EXTTS;
377 ptp_event.index = eqe->data.pps.pin;
378 ptp_event.timestamp =
379 timecounter_cyc2time(&priv->tstamp.clock,
380 be64_to_cpu(eqe->data.pps.time_stamp));
381 mlx5e_pps_event_handler(vpriv, &ptp_event);
382 break;
f62b8bb8
AV
383 default:
384 break;
385 }
386}
387
f62b8bb8
AV
388static void mlx5e_enable_async_events(struct mlx5e_priv *priv)
389{
e0f46eb9 390 set_bit(MLX5E_STATE_ASYNC_EVENTS_ENABLED, &priv->state);
f62b8bb8
AV
391}
392
393static void mlx5e_disable_async_events(struct mlx5e_priv *priv)
394{
e0f46eb9 395 clear_bit(MLX5E_STATE_ASYNC_EVENTS_ENABLED, &priv->state);
daa21560 396 synchronize_irq(mlx5_get_msix_vec(priv->mdev, MLX5_EQ_VEC_ASYNC));
f62b8bb8
AV
397}
398
7e426671
TT
399static inline int mlx5e_get_wqe_mtt_sz(void)
400{
401 /* UMR copies MTTs in units of MLX5_UMR_MTT_ALIGNMENT bytes.
402 * To avoid copying garbage after the mtt array, we allocate
403 * a little more.
404 */
405 return ALIGN(MLX5_MPWRQ_PAGES_PER_WQE * sizeof(__be64),
406 MLX5_UMR_MTT_ALIGNMENT);
407}
408
31391048
SM
409static inline void mlx5e_build_umr_wqe(struct mlx5e_rq *rq,
410 struct mlx5e_icosq *sq,
411 struct mlx5e_umr_wqe *wqe,
412 u16 ix)
7e426671
TT
413{
414 struct mlx5_wqe_ctrl_seg *cseg = &wqe->ctrl;
415 struct mlx5_wqe_umr_ctrl_seg *ucseg = &wqe->uctrl;
416 struct mlx5_wqe_data_seg *dseg = &wqe->data;
21c59685 417 struct mlx5e_mpw_info *wi = &rq->mpwqe.info[ix];
7e426671
TT
418 u8 ds_cnt = DIV_ROUND_UP(sizeof(*wqe), MLX5_SEND_WQE_DS);
419 u32 umr_wqe_mtt_offset = mlx5e_get_wqe_mtt_offset(rq, ix);
420
421 cseg->qpn_ds = cpu_to_be32((sq->sqn << MLX5_WQE_CTRL_QPN_SHIFT) |
422 ds_cnt);
423 cseg->fm_ce_se = MLX5_WQE_CTRL_CQ_UPDATE;
424 cseg->imm = rq->mkey_be;
425
426 ucseg->flags = MLX5_UMR_TRANSLATION_OFFSET_EN;
31616255 427 ucseg->xlt_octowords =
7e426671
TT
428 cpu_to_be16(MLX5_MTT_OCTW(MLX5_MPWRQ_PAGES_PER_WQE));
429 ucseg->bsf_octowords =
430 cpu_to_be16(MLX5_MTT_OCTW(umr_wqe_mtt_offset));
431 ucseg->mkey_mask = cpu_to_be64(MLX5_MKEY_MASK_FREE);
432
433 dseg->lkey = sq->mkey_be;
434 dseg->addr = cpu_to_be64(wi->umr.mtt_addr);
435}
436
437static int mlx5e_rq_alloc_mpwqe_info(struct mlx5e_rq *rq,
438 struct mlx5e_channel *c)
439{
440 int wq_sz = mlx5_wq_ll_get_size(&rq->wq);
441 int mtt_sz = mlx5e_get_wqe_mtt_sz();
442 int mtt_alloc = mtt_sz + MLX5_UMR_ALIGN - 1;
443 int i;
444
21c59685
SM
445 rq->mpwqe.info = kzalloc_node(wq_sz * sizeof(*rq->mpwqe.info),
446 GFP_KERNEL, cpu_to_node(c->cpu));
447 if (!rq->mpwqe.info)
7e426671
TT
448 goto err_out;
449
450 /* We allocate more than mtt_sz as we will align the pointer */
21c59685 451 rq->mpwqe.mtt_no_align = kzalloc_node(mtt_alloc * wq_sz, GFP_KERNEL,
7e426671 452 cpu_to_node(c->cpu));
21c59685 453 if (unlikely(!rq->mpwqe.mtt_no_align))
7e426671
TT
454 goto err_free_wqe_info;
455
456 for (i = 0; i < wq_sz; i++) {
21c59685 457 struct mlx5e_mpw_info *wi = &rq->mpwqe.info[i];
7e426671 458
21c59685 459 wi->umr.mtt = PTR_ALIGN(rq->mpwqe.mtt_no_align + i * mtt_alloc,
7e426671
TT
460 MLX5_UMR_ALIGN);
461 wi->umr.mtt_addr = dma_map_single(c->pdev, wi->umr.mtt, mtt_sz,
462 PCI_DMA_TODEVICE);
463 if (unlikely(dma_mapping_error(c->pdev, wi->umr.mtt_addr)))
464 goto err_unmap_mtts;
465
466 mlx5e_build_umr_wqe(rq, &c->icosq, &wi->umr.wqe, i);
467 }
468
469 return 0;
470
471err_unmap_mtts:
472 while (--i >= 0) {
21c59685 473 struct mlx5e_mpw_info *wi = &rq->mpwqe.info[i];
7e426671
TT
474
475 dma_unmap_single(c->pdev, wi->umr.mtt_addr, mtt_sz,
476 PCI_DMA_TODEVICE);
477 }
21c59685 478 kfree(rq->mpwqe.mtt_no_align);
7e426671 479err_free_wqe_info:
21c59685 480 kfree(rq->mpwqe.info);
7e426671
TT
481
482err_out:
483 return -ENOMEM;
484}
485
486static void mlx5e_rq_free_mpwqe_info(struct mlx5e_rq *rq)
487{
488 int wq_sz = mlx5_wq_ll_get_size(&rq->wq);
489 int mtt_sz = mlx5e_get_wqe_mtt_sz();
490 int i;
491
492 for (i = 0; i < wq_sz; i++) {
21c59685 493 struct mlx5e_mpw_info *wi = &rq->mpwqe.info[i];
7e426671
TT
494
495 dma_unmap_single(rq->pdev, wi->umr.mtt_addr, mtt_sz,
496 PCI_DMA_TODEVICE);
497 }
21c59685
SM
498 kfree(rq->mpwqe.mtt_no_align);
499 kfree(rq->mpwqe.info);
7e426671
TT
500}
501
a43b25da 502static int mlx5e_create_umr_mkey(struct mlx5_core_dev *mdev,
ec8b9981
TT
503 u64 npages, u8 page_shift,
504 struct mlx5_core_mkey *umr_mkey)
3608ae77 505{
3608ae77
TT
506 int inlen = MLX5_ST_SZ_BYTES(create_mkey_in);
507 void *mkc;
508 u32 *in;
509 int err;
510
ec8b9981
TT
511 if (!MLX5E_VALID_NUM_MTTS(npages))
512 return -EINVAL;
513
1b9a07ee 514 in = kvzalloc(inlen, GFP_KERNEL);
3608ae77
TT
515 if (!in)
516 return -ENOMEM;
517
518 mkc = MLX5_ADDR_OF(create_mkey_in, in, memory_key_mkey_entry);
519
3608ae77
TT
520 MLX5_SET(mkc, mkc, free, 1);
521 MLX5_SET(mkc, mkc, umr_en, 1);
522 MLX5_SET(mkc, mkc, lw, 1);
523 MLX5_SET(mkc, mkc, lr, 1);
524 MLX5_SET(mkc, mkc, access_mode, MLX5_MKC_ACCESS_MODE_MTT);
525
526 MLX5_SET(mkc, mkc, qpn, 0xffffff);
527 MLX5_SET(mkc, mkc, pd, mdev->mlx5e_res.pdn);
ec8b9981 528 MLX5_SET64(mkc, mkc, len, npages << page_shift);
3608ae77
TT
529 MLX5_SET(mkc, mkc, translations_octword_size,
530 MLX5_MTT_OCTW(npages));
ec8b9981 531 MLX5_SET(mkc, mkc, log_page_size, page_shift);
3608ae77 532
ec8b9981 533 err = mlx5_core_create_mkey(mdev, umr_mkey, in, inlen);
3608ae77
TT
534
535 kvfree(in);
536 return err;
537}
538
a43b25da 539static int mlx5e_create_rq_umr_mkey(struct mlx5_core_dev *mdev, struct mlx5e_rq *rq)
ec8b9981 540{
6a9764ef 541 u64 num_mtts = MLX5E_REQUIRED_MTTS(mlx5_wq_ll_get_size(&rq->wq));
ec8b9981 542
a43b25da 543 return mlx5e_create_umr_mkey(mdev, num_mtts, PAGE_SHIFT, &rq->umr_mkey);
ec8b9981
TT
544}
545
3b77235b 546static int mlx5e_alloc_rq(struct mlx5e_channel *c,
6a9764ef
SM
547 struct mlx5e_params *params,
548 struct mlx5e_rq_param *rqp,
3b77235b 549 struct mlx5e_rq *rq)
f62b8bb8 550{
a43b25da 551 struct mlx5_core_dev *mdev = c->mdev;
6a9764ef 552 void *rqc = rqp->rqc;
f62b8bb8 553 void *rqc_wq = MLX5_ADDR_OF(rqc, rqc, wq);
461017cb 554 u32 byte_count;
1bfecfca 555 int npages;
f62b8bb8
AV
556 int wq_sz;
557 int err;
558 int i;
559
6a9764ef 560 rqp->wq.db_numa_node = cpu_to_node(c->cpu);
311c7c71 561
6a9764ef 562 err = mlx5_wq_ll_create(mdev, &rqp->wq, rqc_wq, &rq->wq,
f62b8bb8
AV
563 &rq->wq_ctrl);
564 if (err)
565 return err;
566
567 rq->wq.db = &rq->wq.db[MLX5_RCV_DBR];
568
569 wq_sz = mlx5_wq_ll_get_size(&rq->wq);
f62b8bb8 570
6a9764ef 571 rq->wq_type = params->rq_wq_type;
7e426671
TT
572 rq->pdev = c->pdev;
573 rq->netdev = c->netdev;
a43b25da 574 rq->tstamp = c->tstamp;
7e426671
TT
575 rq->channel = c;
576 rq->ix = c->ix;
a43b25da 577 rq->mdev = mdev;
97bc402d 578
6a9764ef 579 rq->xdp_prog = params->xdp_prog ? bpf_prog_inc(params->xdp_prog) : NULL;
97bc402d
DB
580 if (IS_ERR(rq->xdp_prog)) {
581 err = PTR_ERR(rq->xdp_prog);
582 rq->xdp_prog = NULL;
583 goto err_rq_wq_destroy;
584 }
7e426671 585
bce2b2bf
TT
586 rq->buff.map_dir = rq->xdp_prog ? DMA_BIDIRECTIONAL : DMA_FROM_DEVICE;
587 rq->rx_headroom = params->rq_headroom;
b5503b99 588
6a9764ef 589 switch (rq->wq_type) {
461017cb 590 case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ:
f5f82476 591
461017cb 592 rq->alloc_wqe = mlx5e_alloc_rx_mpwqe;
6cd392a0 593 rq->dealloc_wqe = mlx5e_dealloc_rx_mpwqe;
461017cb 594
20fd0c19
SM
595 rq->handle_rx_cqe = c->priv->profile->rx_handlers.handle_rx_cqe_mpwqe;
596 if (!rq->handle_rx_cqe) {
597 err = -EINVAL;
598 netdev_err(c->netdev, "RX handler of MPWQE RQ is not set, err %d\n", err);
599 goto err_rq_wq_destroy;
600 }
601
6a9764ef
SM
602 rq->mpwqe_stride_sz = BIT(params->mpwqe_log_stride_sz);
603 rq->mpwqe_num_strides = BIT(params->mpwqe_log_num_strides);
1bfecfca
SM
604
605 rq->buff.wqe_sz = rq->mpwqe_stride_sz * rq->mpwqe_num_strides;
606 byte_count = rq->buff.wqe_sz;
ec8b9981 607
a43b25da 608 err = mlx5e_create_rq_umr_mkey(mdev, rq);
7e426671
TT
609 if (err)
610 goto err_rq_wq_destroy;
ec8b9981
TT
611 rq->mkey_be = cpu_to_be32(rq->umr_mkey.key);
612
613 err = mlx5e_rq_alloc_mpwqe_info(rq, c);
614 if (err)
615 goto err_destroy_umr_mkey;
461017cb
TT
616 break;
617 default: /* MLX5_WQ_TYPE_LINKED_LIST */
accd5883
TT
618 rq->wqe.frag_info =
619 kzalloc_node(wq_sz * sizeof(*rq->wqe.frag_info),
620 GFP_KERNEL, cpu_to_node(c->cpu));
621 if (!rq->wqe.frag_info) {
461017cb
TT
622 err = -ENOMEM;
623 goto err_rq_wq_destroy;
624 }
461017cb 625 rq->alloc_wqe = mlx5e_alloc_rx_wqe;
6cd392a0 626 rq->dealloc_wqe = mlx5e_dealloc_rx_wqe;
461017cb 627
20fd0c19
SM
628 rq->handle_rx_cqe = c->priv->profile->rx_handlers.handle_rx_cqe;
629 if (!rq->handle_rx_cqe) {
accd5883 630 kfree(rq->wqe.frag_info);
20fd0c19
SM
631 err = -EINVAL;
632 netdev_err(c->netdev, "RX handler of RQ is not set, err %d\n", err);
633 goto err_rq_wq_destroy;
634 }
635
6a9764ef
SM
636 rq->buff.wqe_sz = params->lro_en ?
637 params->lro_wqe_sz :
c139dbfd 638 MLX5E_SW2HW_MTU(c->priv, c->netdev->mtu);
accd5883 639 rq->wqe.page_reuse = !params->xdp_prog && !params->lro_en;
1bfecfca
SM
640 byte_count = rq->buff.wqe_sz;
641
642 /* calc the required page order */
accd5883
TT
643 rq->wqe.frag_sz = MLX5_SKB_FRAG_SZ(rq->rx_headroom + byte_count);
644 npages = DIV_ROUND_UP(rq->wqe.frag_sz, PAGE_SIZE);
1bfecfca
SM
645 rq->buff.page_order = order_base_2(npages);
646
461017cb 647 byte_count |= MLX5_HW_START_PADDING;
7e426671 648 rq->mkey_be = c->mkey_be;
461017cb 649 }
f62b8bb8
AV
650
651 for (i = 0; i < wq_sz; i++) {
652 struct mlx5e_rx_wqe *wqe = mlx5_wq_ll_get_wqe(&rq->wq, i);
653
461017cb 654 wqe->data.byte_count = cpu_to_be32(byte_count);
7e426671 655 wqe->data.lkey = rq->mkey_be;
f62b8bb8
AV
656 }
657
cb3c7fd4 658 INIT_WORK(&rq->am.work, mlx5e_rx_am_work);
6a9764ef 659 rq->am.mode = params->rx_cq_period_mode;
4415a031
TT
660 rq->page_cache.head = 0;
661 rq->page_cache.tail = 0;
662
f62b8bb8
AV
663 return 0;
664
ec8b9981
TT
665err_destroy_umr_mkey:
666 mlx5_core_destroy_mkey(mdev, &rq->umr_mkey);
667
f62b8bb8 668err_rq_wq_destroy:
97bc402d
DB
669 if (rq->xdp_prog)
670 bpf_prog_put(rq->xdp_prog);
f62b8bb8
AV
671 mlx5_wq_destroy(&rq->wq_ctrl);
672
673 return err;
674}
675
3b77235b 676static void mlx5e_free_rq(struct mlx5e_rq *rq)
f62b8bb8 677{
4415a031
TT
678 int i;
679
86994156
RS
680 if (rq->xdp_prog)
681 bpf_prog_put(rq->xdp_prog);
682
461017cb
TT
683 switch (rq->wq_type) {
684 case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ:
7e426671 685 mlx5e_rq_free_mpwqe_info(rq);
a43b25da 686 mlx5_core_destroy_mkey(rq->mdev, &rq->umr_mkey);
461017cb
TT
687 break;
688 default: /* MLX5_WQ_TYPE_LINKED_LIST */
accd5883 689 kfree(rq->wqe.frag_info);
461017cb
TT
690 }
691
4415a031
TT
692 for (i = rq->page_cache.head; i != rq->page_cache.tail;
693 i = (i + 1) & (MLX5E_CACHE_SIZE - 1)) {
694 struct mlx5e_dma_info *dma_info = &rq->page_cache.page_cache[i];
695
696 mlx5e_page_release(rq, dma_info, false);
697 }
f62b8bb8
AV
698 mlx5_wq_destroy(&rq->wq_ctrl);
699}
700
6a9764ef
SM
701static int mlx5e_create_rq(struct mlx5e_rq *rq,
702 struct mlx5e_rq_param *param)
f62b8bb8 703{
a43b25da 704 struct mlx5_core_dev *mdev = rq->mdev;
f62b8bb8
AV
705
706 void *in;
707 void *rqc;
708 void *wq;
709 int inlen;
710 int err;
711
712 inlen = MLX5_ST_SZ_BYTES(create_rq_in) +
713 sizeof(u64) * rq->wq_ctrl.buf.npages;
1b9a07ee 714 in = kvzalloc(inlen, GFP_KERNEL);
f62b8bb8
AV
715 if (!in)
716 return -ENOMEM;
717
718 rqc = MLX5_ADDR_OF(create_rq_in, in, ctx);
719 wq = MLX5_ADDR_OF(rqc, rqc, wq);
720
721 memcpy(rqc, param->rqc, sizeof(param->rqc));
722
97de9f31 723 MLX5_SET(rqc, rqc, cqn, rq->cq.mcq.cqn);
f62b8bb8 724 MLX5_SET(rqc, rqc, state, MLX5_RQC_STATE_RST);
f62b8bb8 725 MLX5_SET(wq, wq, log_wq_pg_sz, rq->wq_ctrl.buf.page_shift -
68cdf5d6 726 MLX5_ADAPTER_PAGE_SHIFT);
f62b8bb8
AV
727 MLX5_SET64(wq, wq, dbr_addr, rq->wq_ctrl.db.dma);
728
729 mlx5_fill_page_array(&rq->wq_ctrl.buf,
730 (__be64 *)MLX5_ADDR_OF(wq, wq, pas));
731
7db22ffb 732 err = mlx5_core_create_rq(mdev, in, inlen, &rq->rqn);
f62b8bb8
AV
733
734 kvfree(in);
735
736 return err;
737}
738
36350114
GP
739static int mlx5e_modify_rq_state(struct mlx5e_rq *rq, int curr_state,
740 int next_state)
f62b8bb8
AV
741{
742 struct mlx5e_channel *c = rq->channel;
a43b25da 743 struct mlx5_core_dev *mdev = c->mdev;
f62b8bb8
AV
744
745 void *in;
746 void *rqc;
747 int inlen;
748 int err;
749
750 inlen = MLX5_ST_SZ_BYTES(modify_rq_in);
1b9a07ee 751 in = kvzalloc(inlen, GFP_KERNEL);
f62b8bb8
AV
752 if (!in)
753 return -ENOMEM;
754
755 rqc = MLX5_ADDR_OF(modify_rq_in, in, ctx);
756
757 MLX5_SET(modify_rq_in, in, rq_state, curr_state);
758 MLX5_SET(rqc, rqc, state, next_state);
759
7db22ffb 760 err = mlx5_core_modify_rq(mdev, rq->rqn, in, inlen);
f62b8bb8
AV
761
762 kvfree(in);
763
764 return err;
765}
766
102722fc
GE
767static int mlx5e_modify_rq_scatter_fcs(struct mlx5e_rq *rq, bool enable)
768{
769 struct mlx5e_channel *c = rq->channel;
770 struct mlx5e_priv *priv = c->priv;
771 struct mlx5_core_dev *mdev = priv->mdev;
772
773 void *in;
774 void *rqc;
775 int inlen;
776 int err;
777
778 inlen = MLX5_ST_SZ_BYTES(modify_rq_in);
1b9a07ee 779 in = kvzalloc(inlen, GFP_KERNEL);
102722fc
GE
780 if (!in)
781 return -ENOMEM;
782
783 rqc = MLX5_ADDR_OF(modify_rq_in, in, ctx);
784
785 MLX5_SET(modify_rq_in, in, rq_state, MLX5_RQC_STATE_RDY);
786 MLX5_SET64(modify_rq_in, in, modify_bitmask,
787 MLX5_MODIFY_RQ_IN_MODIFY_BITMASK_SCATTER_FCS);
788 MLX5_SET(rqc, rqc, scatter_fcs, enable);
789 MLX5_SET(rqc, rqc, state, MLX5_RQC_STATE_RDY);
790
791 err = mlx5_core_modify_rq(mdev, rq->rqn, in, inlen);
792
793 kvfree(in);
794
795 return err;
796}
797
36350114
GP
798static int mlx5e_modify_rq_vsd(struct mlx5e_rq *rq, bool vsd)
799{
800 struct mlx5e_channel *c = rq->channel;
a43b25da 801 struct mlx5_core_dev *mdev = c->mdev;
36350114
GP
802 void *in;
803 void *rqc;
804 int inlen;
805 int err;
806
807 inlen = MLX5_ST_SZ_BYTES(modify_rq_in);
1b9a07ee 808 in = kvzalloc(inlen, GFP_KERNEL);
36350114
GP
809 if (!in)
810 return -ENOMEM;
811
812 rqc = MLX5_ADDR_OF(modify_rq_in, in, ctx);
813
814 MLX5_SET(modify_rq_in, in, rq_state, MLX5_RQC_STATE_RDY);
83b502a1
AV
815 MLX5_SET64(modify_rq_in, in, modify_bitmask,
816 MLX5_MODIFY_RQ_IN_MODIFY_BITMASK_VSD);
36350114
GP
817 MLX5_SET(rqc, rqc, vsd, vsd);
818 MLX5_SET(rqc, rqc, state, MLX5_RQC_STATE_RDY);
819
820 err = mlx5_core_modify_rq(mdev, rq->rqn, in, inlen);
821
822 kvfree(in);
823
824 return err;
825}
826
3b77235b 827static void mlx5e_destroy_rq(struct mlx5e_rq *rq)
f62b8bb8 828{
a43b25da 829 mlx5_core_destroy_rq(rq->mdev, rq->rqn);
f62b8bb8
AV
830}
831
832static int mlx5e_wait_for_min_rx_wqes(struct mlx5e_rq *rq)
833{
01c196a2 834 unsigned long exp_time = jiffies + msecs_to_jiffies(20000);
f62b8bb8 835 struct mlx5e_channel *c = rq->channel;
a43b25da 836
f62b8bb8 837 struct mlx5_wq_ll *wq = &rq->wq;
6a9764ef 838 u16 min_wqes = mlx5_min_rx_wqes(rq->wq_type, mlx5_wq_ll_get_size(wq));
f62b8bb8 839
01c196a2 840 while (time_before(jiffies, exp_time)) {
6a9764ef 841 if (wq->cur_sz >= min_wqes)
f62b8bb8
AV
842 return 0;
843
844 msleep(20);
845 }
846
a43b25da 847 netdev_warn(c->netdev, "Failed to get min RX wqes on RQN[0x%x] wq cur_sz(%d) min_rx_wqes(%d)\n",
6a9764ef 848 rq->rqn, wq->cur_sz, min_wqes);
f62b8bb8
AV
849 return -ETIMEDOUT;
850}
851
f2fde18c
SM
852static void mlx5e_free_rx_descs(struct mlx5e_rq *rq)
853{
854 struct mlx5_wq_ll *wq = &rq->wq;
855 struct mlx5e_rx_wqe *wqe;
856 __be16 wqe_ix_be;
857 u16 wqe_ix;
858
8484f9ed
SM
859 /* UMR WQE (if in progress) is always at wq->head */
860 if (test_bit(MLX5E_RQ_STATE_UMR_WQE_IN_PROGRESS, &rq->state))
21c59685 861 mlx5e_free_rx_mpwqe(rq, &rq->mpwqe.info[wq->head]);
8484f9ed 862
f2fde18c
SM
863 while (!mlx5_wq_ll_is_empty(wq)) {
864 wqe_ix_be = *wq->tail_next;
865 wqe_ix = be16_to_cpu(wqe_ix_be);
866 wqe = mlx5_wq_ll_get_wqe(&rq->wq, wqe_ix);
867 rq->dealloc_wqe(rq, wqe_ix);
868 mlx5_wq_ll_pop(&rq->wq, wqe_ix_be,
869 &wqe->next.next_wqe_index);
870 }
accd5883
TT
871
872 if (rq->wq_type == MLX5_WQ_TYPE_LINKED_LIST && rq->wqe.page_reuse) {
873 /* Clean outstanding pages on handled WQEs that decided to do page-reuse,
874 * but yet to be re-posted.
875 */
876 int wq_sz = mlx5_wq_ll_get_size(&rq->wq);
877
878 for (wqe_ix = 0; wqe_ix < wq_sz; wqe_ix++)
879 rq->dealloc_wqe(rq, wqe_ix);
880 }
f2fde18c
SM
881}
882
f62b8bb8 883static int mlx5e_open_rq(struct mlx5e_channel *c,
6a9764ef 884 struct mlx5e_params *params,
f62b8bb8
AV
885 struct mlx5e_rq_param *param,
886 struct mlx5e_rq *rq)
887{
888 int err;
889
6a9764ef 890 err = mlx5e_alloc_rq(c, params, param, rq);
f62b8bb8
AV
891 if (err)
892 return err;
893
3b77235b 894 err = mlx5e_create_rq(rq, param);
f62b8bb8 895 if (err)
3b77235b 896 goto err_free_rq;
f62b8bb8 897
36350114 898 err = mlx5e_modify_rq_state(rq, MLX5_RQC_STATE_RST, MLX5_RQC_STATE_RDY);
f62b8bb8 899 if (err)
3b77235b 900 goto err_destroy_rq;
f62b8bb8 901
6a9764ef 902 if (params->rx_am_enabled)
cb3c7fd4
GR
903 set_bit(MLX5E_RQ_STATE_AM, &c->rq.state);
904
f62b8bb8
AV
905 return 0;
906
f62b8bb8
AV
907err_destroy_rq:
908 mlx5e_destroy_rq(rq);
3b77235b
SM
909err_free_rq:
910 mlx5e_free_rq(rq);
f62b8bb8
AV
911
912 return err;
913}
914
acc6c595
SM
915static void mlx5e_activate_rq(struct mlx5e_rq *rq)
916{
917 struct mlx5e_icosq *sq = &rq->channel->icosq;
918 u16 pi = sq->pc & sq->wq.sz_m1;
919 struct mlx5e_tx_wqe *nopwqe;
920
921 set_bit(MLX5E_RQ_STATE_ENABLED, &rq->state);
922 sq->db.ico_wqe[pi].opcode = MLX5_OPCODE_NOP;
923 sq->db.ico_wqe[pi].num_wqebbs = 1;
924 nopwqe = mlx5e_post_nop(&sq->wq, sq->sqn, &sq->pc);
925 mlx5e_notify_hw(&sq->wq, sq->pc, sq->uar_map, &nopwqe->ctrl);
926}
927
928static void mlx5e_deactivate_rq(struct mlx5e_rq *rq)
f62b8bb8 929{
c0f1147d 930 clear_bit(MLX5E_RQ_STATE_ENABLED, &rq->state);
f62b8bb8 931 napi_synchronize(&rq->channel->napi); /* prevent mlx5e_post_rx_wqes */
acc6c595 932}
cb3c7fd4 933
acc6c595
SM
934static void mlx5e_close_rq(struct mlx5e_rq *rq)
935{
936 cancel_work_sync(&rq->am.work);
f62b8bb8 937 mlx5e_destroy_rq(rq);
3b77235b
SM
938 mlx5e_free_rx_descs(rq);
939 mlx5e_free_rq(rq);
f62b8bb8
AV
940}
941
31391048 942static void mlx5e_free_xdpsq_db(struct mlx5e_xdpsq *sq)
b5503b99 943{
31391048 944 kfree(sq->db.di);
b5503b99
SM
945}
946
31391048 947static int mlx5e_alloc_xdpsq_db(struct mlx5e_xdpsq *sq, int numa)
b5503b99
SM
948{
949 int wq_sz = mlx5_wq_cyc_get_size(&sq->wq);
950
31391048 951 sq->db.di = kzalloc_node(sizeof(*sq->db.di) * wq_sz,
b5503b99 952 GFP_KERNEL, numa);
31391048
SM
953 if (!sq->db.di) {
954 mlx5e_free_xdpsq_db(sq);
b5503b99
SM
955 return -ENOMEM;
956 }
957
958 return 0;
959}
960
31391048 961static int mlx5e_alloc_xdpsq(struct mlx5e_channel *c,
6a9764ef 962 struct mlx5e_params *params,
31391048
SM
963 struct mlx5e_sq_param *param,
964 struct mlx5e_xdpsq *sq)
965{
966 void *sqc_wq = MLX5_ADDR_OF(sqc, param->sqc, wq);
a43b25da 967 struct mlx5_core_dev *mdev = c->mdev;
31391048
SM
968 int err;
969
970 sq->pdev = c->pdev;
971 sq->mkey_be = c->mkey_be;
972 sq->channel = c;
973 sq->uar_map = mdev->mlx5e_res.bfreg.map;
6a9764ef 974 sq->min_inline_mode = params->tx_min_inline_mode;
31391048
SM
975
976 param->wq.db_numa_node = cpu_to_node(c->cpu);
977 err = mlx5_wq_cyc_create(mdev, &param->wq, sqc_wq, &sq->wq, &sq->wq_ctrl);
978 if (err)
979 return err;
980 sq->wq.db = &sq->wq.db[MLX5_SND_DBR];
981
982 err = mlx5e_alloc_xdpsq_db(sq, cpu_to_node(c->cpu));
983 if (err)
984 goto err_sq_wq_destroy;
985
986 return 0;
987
988err_sq_wq_destroy:
989 mlx5_wq_destroy(&sq->wq_ctrl);
990
991 return err;
992}
993
994static void mlx5e_free_xdpsq(struct mlx5e_xdpsq *sq)
995{
996 mlx5e_free_xdpsq_db(sq);
997 mlx5_wq_destroy(&sq->wq_ctrl);
998}
999
1000static void mlx5e_free_icosq_db(struct mlx5e_icosq *sq)
f62b8bb8 1001{
f10b7cc7 1002 kfree(sq->db.ico_wqe);
f62b8bb8
AV
1003}
1004
31391048 1005static int mlx5e_alloc_icosq_db(struct mlx5e_icosq *sq, int numa)
f10b7cc7
SM
1006{
1007 u8 wq_sz = mlx5_wq_cyc_get_size(&sq->wq);
1008
1009 sq->db.ico_wqe = kzalloc_node(sizeof(*sq->db.ico_wqe) * wq_sz,
1010 GFP_KERNEL, numa);
1011 if (!sq->db.ico_wqe)
1012 return -ENOMEM;
1013
1014 return 0;
1015}
1016
31391048 1017static int mlx5e_alloc_icosq(struct mlx5e_channel *c,
31391048
SM
1018 struct mlx5e_sq_param *param,
1019 struct mlx5e_icosq *sq)
f10b7cc7 1020{
31391048 1021 void *sqc_wq = MLX5_ADDR_OF(sqc, param->sqc, wq);
a43b25da 1022 struct mlx5_core_dev *mdev = c->mdev;
31391048 1023 int err;
f10b7cc7 1024
31391048
SM
1025 sq->pdev = c->pdev;
1026 sq->mkey_be = c->mkey_be;
1027 sq->channel = c;
1028 sq->uar_map = mdev->mlx5e_res.bfreg.map;
f62b8bb8 1029
31391048
SM
1030 param->wq.db_numa_node = cpu_to_node(c->cpu);
1031 err = mlx5_wq_cyc_create(mdev, &param->wq, sqc_wq, &sq->wq, &sq->wq_ctrl);
1032 if (err)
1033 return err;
1034 sq->wq.db = &sq->wq.db[MLX5_SND_DBR];
f62b8bb8 1035
31391048
SM
1036 err = mlx5e_alloc_icosq_db(sq, cpu_to_node(c->cpu));
1037 if (err)
1038 goto err_sq_wq_destroy;
1039
1040 sq->edge = (sq->wq.sz_m1 + 1) - MLX5E_ICOSQ_MAX_WQEBBS;
f62b8bb8
AV
1041
1042 return 0;
31391048
SM
1043
1044err_sq_wq_destroy:
1045 mlx5_wq_destroy(&sq->wq_ctrl);
1046
1047 return err;
f62b8bb8
AV
1048}
1049
31391048 1050static void mlx5e_free_icosq(struct mlx5e_icosq *sq)
f10b7cc7 1051{
31391048
SM
1052 mlx5e_free_icosq_db(sq);
1053 mlx5_wq_destroy(&sq->wq_ctrl);
f10b7cc7
SM
1054}
1055
31391048 1056static void mlx5e_free_txqsq_db(struct mlx5e_txqsq *sq)
f10b7cc7 1057{
31391048
SM
1058 kfree(sq->db.wqe_info);
1059 kfree(sq->db.dma_fifo);
f10b7cc7
SM
1060}
1061
31391048 1062static int mlx5e_alloc_txqsq_db(struct mlx5e_txqsq *sq, int numa)
b5503b99 1063{
31391048
SM
1064 int wq_sz = mlx5_wq_cyc_get_size(&sq->wq);
1065 int df_sz = wq_sz * MLX5_SEND_WQEBB_NUM_DS;
1066
31391048
SM
1067 sq->db.dma_fifo = kzalloc_node(df_sz * sizeof(*sq->db.dma_fifo),
1068 GFP_KERNEL, numa);
1069 sq->db.wqe_info = kzalloc_node(wq_sz * sizeof(*sq->db.wqe_info),
1070 GFP_KERNEL, numa);
77bdf895 1071 if (!sq->db.dma_fifo || !sq->db.wqe_info) {
31391048
SM
1072 mlx5e_free_txqsq_db(sq);
1073 return -ENOMEM;
b5503b99 1074 }
31391048
SM
1075
1076 sq->dma_fifo_mask = df_sz - 1;
1077
1078 return 0;
b5503b99
SM
1079}
1080
31391048 1081static int mlx5e_alloc_txqsq(struct mlx5e_channel *c,
acc6c595 1082 int txq_ix,
6a9764ef 1083 struct mlx5e_params *params,
31391048
SM
1084 struct mlx5e_sq_param *param,
1085 struct mlx5e_txqsq *sq)
f62b8bb8 1086{
31391048 1087 void *sqc_wq = MLX5_ADDR_OF(sqc, param->sqc, wq);
a43b25da 1088 struct mlx5_core_dev *mdev = c->mdev;
f62b8bb8
AV
1089 int err;
1090
f10b7cc7 1091 sq->pdev = c->pdev;
a43b25da 1092 sq->tstamp = c->tstamp;
f10b7cc7
SM
1093 sq->mkey_be = c->mkey_be;
1094 sq->channel = c;
acc6c595 1095 sq->txq_ix = txq_ix;
aff26157 1096 sq->uar_map = mdev->mlx5e_res.bfreg.map;
6a9764ef
SM
1097 sq->max_inline = params->tx_max_inline;
1098 sq->min_inline_mode = params->tx_min_inline_mode;
f10b7cc7 1099
311c7c71 1100 param->wq.db_numa_node = cpu_to_node(c->cpu);
31391048 1101 err = mlx5_wq_cyc_create(mdev, &param->wq, sqc_wq, &sq->wq, &sq->wq_ctrl);
f62b8bb8 1102 if (err)
aff26157 1103 return err;
31391048 1104 sq->wq.db = &sq->wq.db[MLX5_SND_DBR];
f62b8bb8 1105
31391048 1106 err = mlx5e_alloc_txqsq_db(sq, cpu_to_node(c->cpu));
7ec0bb22 1107 if (err)
f62b8bb8
AV
1108 goto err_sq_wq_destroy;
1109
31391048 1110 sq->edge = (sq->wq.sz_m1 + 1) - MLX5_SEND_WQE_MAX_WQEBBS;
f62b8bb8
AV
1111
1112 return 0;
1113
1114err_sq_wq_destroy:
1115 mlx5_wq_destroy(&sq->wq_ctrl);
1116
f62b8bb8
AV
1117 return err;
1118}
1119
31391048 1120static void mlx5e_free_txqsq(struct mlx5e_txqsq *sq)
f62b8bb8 1121{
31391048 1122 mlx5e_free_txqsq_db(sq);
f62b8bb8 1123 mlx5_wq_destroy(&sq->wq_ctrl);
f62b8bb8
AV
1124}
1125
33ad9711
SM
1126struct mlx5e_create_sq_param {
1127 struct mlx5_wq_ctrl *wq_ctrl;
1128 u32 cqn;
1129 u32 tisn;
1130 u8 tis_lst_sz;
1131 u8 min_inline_mode;
1132};
1133
a43b25da 1134static int mlx5e_create_sq(struct mlx5_core_dev *mdev,
33ad9711
SM
1135 struct mlx5e_sq_param *param,
1136 struct mlx5e_create_sq_param *csp,
1137 u32 *sqn)
f62b8bb8 1138{
f62b8bb8
AV
1139 void *in;
1140 void *sqc;
1141 void *wq;
1142 int inlen;
1143 int err;
1144
1145 inlen = MLX5_ST_SZ_BYTES(create_sq_in) +
33ad9711 1146 sizeof(u64) * csp->wq_ctrl->buf.npages;
1b9a07ee 1147 in = kvzalloc(inlen, GFP_KERNEL);
f62b8bb8
AV
1148 if (!in)
1149 return -ENOMEM;
1150
1151 sqc = MLX5_ADDR_OF(create_sq_in, in, ctx);
1152 wq = MLX5_ADDR_OF(sqc, sqc, wq);
1153
1154 memcpy(sqc, param->sqc, sizeof(param->sqc));
33ad9711
SM
1155 MLX5_SET(sqc, sqc, tis_lst_sz, csp->tis_lst_sz);
1156 MLX5_SET(sqc, sqc, tis_num_0, csp->tisn);
1157 MLX5_SET(sqc, sqc, cqn, csp->cqn);
a6f402e4
SM
1158
1159 if (MLX5_CAP_ETH(mdev, wqe_inline_mode) == MLX5_CAP_INLINE_MODE_VPORT_CONTEXT)
33ad9711 1160 MLX5_SET(sqc, sqc, min_wqe_inline_mode, csp->min_inline_mode);
a6f402e4 1161
33ad9711 1162 MLX5_SET(sqc, sqc, state, MLX5_SQC_STATE_RST);
f62b8bb8
AV
1163
1164 MLX5_SET(wq, wq, wq_type, MLX5_WQ_TYPE_CYCLIC);
a43b25da 1165 MLX5_SET(wq, wq, uar_page, mdev->mlx5e_res.bfreg.index);
33ad9711 1166 MLX5_SET(wq, wq, log_wq_pg_sz, csp->wq_ctrl->buf.page_shift -
68cdf5d6 1167 MLX5_ADAPTER_PAGE_SHIFT);
33ad9711 1168 MLX5_SET64(wq, wq, dbr_addr, csp->wq_ctrl->db.dma);
f62b8bb8 1169
33ad9711 1170 mlx5_fill_page_array(&csp->wq_ctrl->buf, (__be64 *)MLX5_ADDR_OF(wq, wq, pas));
f62b8bb8 1171
33ad9711 1172 err = mlx5_core_create_sq(mdev, in, inlen, sqn);
f62b8bb8
AV
1173
1174 kvfree(in);
1175
1176 return err;
1177}
1178
33ad9711
SM
1179struct mlx5e_modify_sq_param {
1180 int curr_state;
1181 int next_state;
1182 bool rl_update;
1183 int rl_index;
1184};
1185
a43b25da 1186static int mlx5e_modify_sq(struct mlx5_core_dev *mdev, u32 sqn,
33ad9711 1187 struct mlx5e_modify_sq_param *p)
f62b8bb8 1188{
f62b8bb8
AV
1189 void *in;
1190 void *sqc;
1191 int inlen;
1192 int err;
1193
1194 inlen = MLX5_ST_SZ_BYTES(modify_sq_in);
1b9a07ee 1195 in = kvzalloc(inlen, GFP_KERNEL);
f62b8bb8
AV
1196 if (!in)
1197 return -ENOMEM;
1198
1199 sqc = MLX5_ADDR_OF(modify_sq_in, in, ctx);
1200
33ad9711
SM
1201 MLX5_SET(modify_sq_in, in, sq_state, p->curr_state);
1202 MLX5_SET(sqc, sqc, state, p->next_state);
1203 if (p->rl_update && p->next_state == MLX5_SQC_STATE_RDY) {
507f0c81 1204 MLX5_SET64(modify_sq_in, in, modify_bitmask, 1);
33ad9711 1205 MLX5_SET(sqc, sqc, packet_pacing_rate_limit_index, p->rl_index);
507f0c81 1206 }
f62b8bb8 1207
33ad9711 1208 err = mlx5_core_modify_sq(mdev, sqn, in, inlen);
f62b8bb8
AV
1209
1210 kvfree(in);
1211
1212 return err;
1213}
1214
a43b25da 1215static void mlx5e_destroy_sq(struct mlx5_core_dev *mdev, u32 sqn)
33ad9711 1216{
a43b25da 1217 mlx5_core_destroy_sq(mdev, sqn);
f62b8bb8
AV
1218}
1219
a43b25da 1220static int mlx5e_create_sq_rdy(struct mlx5_core_dev *mdev,
31391048
SM
1221 struct mlx5e_sq_param *param,
1222 struct mlx5e_create_sq_param *csp,
1223 u32 *sqn)
f62b8bb8 1224{
33ad9711 1225 struct mlx5e_modify_sq_param msp = {0};
31391048
SM
1226 int err;
1227
a43b25da 1228 err = mlx5e_create_sq(mdev, param, csp, sqn);
31391048
SM
1229 if (err)
1230 return err;
1231
1232 msp.curr_state = MLX5_SQC_STATE_RST;
1233 msp.next_state = MLX5_SQC_STATE_RDY;
a43b25da 1234 err = mlx5e_modify_sq(mdev, *sqn, &msp);
31391048 1235 if (err)
a43b25da 1236 mlx5e_destroy_sq(mdev, *sqn);
31391048
SM
1237
1238 return err;
1239}
1240
7f859ecf
SM
1241static int mlx5e_set_sq_maxrate(struct net_device *dev,
1242 struct mlx5e_txqsq *sq, u32 rate);
1243
31391048 1244static int mlx5e_open_txqsq(struct mlx5e_channel *c,
a43b25da 1245 u32 tisn,
acc6c595 1246 int txq_ix,
6a9764ef 1247 struct mlx5e_params *params,
31391048
SM
1248 struct mlx5e_sq_param *param,
1249 struct mlx5e_txqsq *sq)
1250{
1251 struct mlx5e_create_sq_param csp = {};
7f859ecf 1252 u32 tx_rate;
f62b8bb8
AV
1253 int err;
1254
6a9764ef 1255 err = mlx5e_alloc_txqsq(c, txq_ix, params, param, sq);
f62b8bb8
AV
1256 if (err)
1257 return err;
1258
a43b25da 1259 csp.tisn = tisn;
31391048 1260 csp.tis_lst_sz = 1;
33ad9711
SM
1261 csp.cqn = sq->cq.mcq.cqn;
1262 csp.wq_ctrl = &sq->wq_ctrl;
1263 csp.min_inline_mode = sq->min_inline_mode;
a43b25da 1264 err = mlx5e_create_sq_rdy(c->mdev, param, &csp, &sq->sqn);
f62b8bb8 1265 if (err)
31391048 1266 goto err_free_txqsq;
f62b8bb8 1267
a43b25da 1268 tx_rate = c->priv->tx_rates[sq->txq_ix];
7f859ecf 1269 if (tx_rate)
a43b25da 1270 mlx5e_set_sq_maxrate(c->netdev, sq, tx_rate);
7f859ecf 1271
f62b8bb8
AV
1272 return 0;
1273
31391048 1274err_free_txqsq:
3b77235b 1275 clear_bit(MLX5E_SQ_STATE_ENABLED, &sq->state);
31391048 1276 mlx5e_free_txqsq(sq);
f62b8bb8
AV
1277
1278 return err;
1279}
1280
acc6c595
SM
1281static void mlx5e_activate_txqsq(struct mlx5e_txqsq *sq)
1282{
a43b25da 1283 sq->txq = netdev_get_tx_queue(sq->channel->netdev, sq->txq_ix);
acc6c595
SM
1284 set_bit(MLX5E_SQ_STATE_ENABLED, &sq->state);
1285 netdev_tx_reset_queue(sq->txq);
1286 netif_tx_start_queue(sq->txq);
1287}
1288
f62b8bb8
AV
1289static inline void netif_tx_disable_queue(struct netdev_queue *txq)
1290{
1291 __netif_tx_lock_bh(txq);
1292 netif_tx_stop_queue(txq);
1293 __netif_tx_unlock_bh(txq);
1294}
1295
acc6c595 1296static void mlx5e_deactivate_txqsq(struct mlx5e_txqsq *sq)
f62b8bb8 1297{
33ad9711 1298 struct mlx5e_channel *c = sq->channel;
33ad9711 1299
c0f1147d 1300 clear_bit(MLX5E_SQ_STATE_ENABLED, &sq->state);
6e8dd6d6 1301 /* prevent netif_tx_wake_queue */
33ad9711 1302 napi_synchronize(&c->napi);
29429f33 1303
31391048 1304 netif_tx_disable_queue(sq->txq);
f62b8bb8 1305
31391048
SM
1306 /* last doorbell out, godspeed .. */
1307 if (mlx5e_wqc_has_room_for(&sq->wq, sq->cc, sq->pc, 1)) {
1308 struct mlx5e_tx_wqe *nop;
864b2d71 1309
77bdf895 1310 sq->db.wqe_info[(sq->pc & sq->wq.sz_m1)].skb = NULL;
31391048
SM
1311 nop = mlx5e_post_nop(&sq->wq, sq->sqn, &sq->pc);
1312 mlx5e_notify_hw(&sq->wq, sq->pc, sq->uar_map, &nop->ctrl);
29429f33 1313 }
acc6c595
SM
1314}
1315
1316static void mlx5e_close_txqsq(struct mlx5e_txqsq *sq)
1317{
1318 struct mlx5e_channel *c = sq->channel;
a43b25da 1319 struct mlx5_core_dev *mdev = c->mdev;
f62b8bb8 1320
a43b25da 1321 mlx5e_destroy_sq(mdev, sq->sqn);
33ad9711
SM
1322 if (sq->rate_limit)
1323 mlx5_rl_remove_rate(mdev, sq->rate_limit);
31391048
SM
1324 mlx5e_free_txqsq_descs(sq);
1325 mlx5e_free_txqsq(sq);
1326}
1327
1328static int mlx5e_open_icosq(struct mlx5e_channel *c,
6a9764ef 1329 struct mlx5e_params *params,
31391048
SM
1330 struct mlx5e_sq_param *param,
1331 struct mlx5e_icosq *sq)
1332{
1333 struct mlx5e_create_sq_param csp = {};
1334 int err;
1335
6a9764ef 1336 err = mlx5e_alloc_icosq(c, param, sq);
31391048
SM
1337 if (err)
1338 return err;
1339
1340 csp.cqn = sq->cq.mcq.cqn;
1341 csp.wq_ctrl = &sq->wq_ctrl;
6a9764ef 1342 csp.min_inline_mode = params->tx_min_inline_mode;
31391048 1343 set_bit(MLX5E_SQ_STATE_ENABLED, &sq->state);
a43b25da 1344 err = mlx5e_create_sq_rdy(c->mdev, param, &csp, &sq->sqn);
31391048
SM
1345 if (err)
1346 goto err_free_icosq;
1347
1348 return 0;
1349
1350err_free_icosq:
1351 clear_bit(MLX5E_SQ_STATE_ENABLED, &sq->state);
1352 mlx5e_free_icosq(sq);
1353
1354 return err;
1355}
1356
1357static void mlx5e_close_icosq(struct mlx5e_icosq *sq)
1358{
1359 struct mlx5e_channel *c = sq->channel;
1360
1361 clear_bit(MLX5E_SQ_STATE_ENABLED, &sq->state);
1362 napi_synchronize(&c->napi);
1363
a43b25da 1364 mlx5e_destroy_sq(c->mdev, sq->sqn);
31391048
SM
1365 mlx5e_free_icosq(sq);
1366}
1367
1368static int mlx5e_open_xdpsq(struct mlx5e_channel *c,
6a9764ef 1369 struct mlx5e_params *params,
31391048
SM
1370 struct mlx5e_sq_param *param,
1371 struct mlx5e_xdpsq *sq)
1372{
1373 unsigned int ds_cnt = MLX5E_XDP_TX_DS_COUNT;
1374 struct mlx5e_create_sq_param csp = {};
31391048
SM
1375 unsigned int inline_hdr_sz = 0;
1376 int err;
1377 int i;
1378
6a9764ef 1379 err = mlx5e_alloc_xdpsq(c, params, param, sq);
31391048
SM
1380 if (err)
1381 return err;
1382
1383 csp.tis_lst_sz = 1;
a43b25da 1384 csp.tisn = c->priv->tisn[0]; /* tc = 0 */
31391048
SM
1385 csp.cqn = sq->cq.mcq.cqn;
1386 csp.wq_ctrl = &sq->wq_ctrl;
1387 csp.min_inline_mode = sq->min_inline_mode;
1388 set_bit(MLX5E_SQ_STATE_ENABLED, &sq->state);
a43b25da 1389 err = mlx5e_create_sq_rdy(c->mdev, param, &csp, &sq->sqn);
31391048
SM
1390 if (err)
1391 goto err_free_xdpsq;
1392
1393 if (sq->min_inline_mode != MLX5_INLINE_MODE_NONE) {
1394 inline_hdr_sz = MLX5E_XDP_MIN_INLINE;
1395 ds_cnt++;
1396 }
1397
1398 /* Pre initialize fixed WQE fields */
1399 for (i = 0; i < mlx5_wq_cyc_get_size(&sq->wq); i++) {
1400 struct mlx5e_tx_wqe *wqe = mlx5_wq_cyc_get_wqe(&sq->wq, i);
1401 struct mlx5_wqe_ctrl_seg *cseg = &wqe->ctrl;
1402 struct mlx5_wqe_eth_seg *eseg = &wqe->eth;
1403 struct mlx5_wqe_data_seg *dseg;
1404
1405 cseg->qpn_ds = cpu_to_be32((sq->sqn << 8) | ds_cnt);
1406 eseg->inline_hdr.sz = cpu_to_be16(inline_hdr_sz);
1407
1408 dseg = (struct mlx5_wqe_data_seg *)cseg + (ds_cnt - 1);
1409 dseg->lkey = sq->mkey_be;
1410 }
1411
1412 return 0;
1413
1414err_free_xdpsq:
1415 clear_bit(MLX5E_SQ_STATE_ENABLED, &sq->state);
1416 mlx5e_free_xdpsq(sq);
1417
1418 return err;
1419}
1420
1421static void mlx5e_close_xdpsq(struct mlx5e_xdpsq *sq)
1422{
1423 struct mlx5e_channel *c = sq->channel;
1424
1425 clear_bit(MLX5E_SQ_STATE_ENABLED, &sq->state);
1426 napi_synchronize(&c->napi);
1427
a43b25da 1428 mlx5e_destroy_sq(c->mdev, sq->sqn);
31391048
SM
1429 mlx5e_free_xdpsq_descs(sq);
1430 mlx5e_free_xdpsq(sq);
f62b8bb8
AV
1431}
1432
95b6c6a5
EBE
1433static int mlx5e_alloc_cq_common(struct mlx5_core_dev *mdev,
1434 struct mlx5e_cq_param *param,
1435 struct mlx5e_cq *cq)
f62b8bb8 1436{
f62b8bb8
AV
1437 struct mlx5_core_cq *mcq = &cq->mcq;
1438 int eqn_not_used;
0b6e26ce 1439 unsigned int irqn;
f62b8bb8
AV
1440 int err;
1441 u32 i;
1442
f62b8bb8
AV
1443 err = mlx5_cqwq_create(mdev, &param->wq, param->cqc, &cq->wq,
1444 &cq->wq_ctrl);
1445 if (err)
1446 return err;
1447
1448 mlx5_vector2eqn(mdev, param->eq_ix, &eqn_not_used, &irqn);
1449
f62b8bb8
AV
1450 mcq->cqe_sz = 64;
1451 mcq->set_ci_db = cq->wq_ctrl.db.db;
1452 mcq->arm_db = cq->wq_ctrl.db.db + 1;
1453 *mcq->set_ci_db = 0;
1454 *mcq->arm_db = 0;
1455 mcq->vector = param->eq_ix;
1456 mcq->comp = mlx5e_completion_event;
1457 mcq->event = mlx5e_cq_error_event;
1458 mcq->irqn = irqn;
f62b8bb8
AV
1459
1460 for (i = 0; i < mlx5_cqwq_get_size(&cq->wq); i++) {
1461 struct mlx5_cqe64 *cqe = mlx5_cqwq_get_wqe(&cq->wq, i);
1462
1463 cqe->op_own = 0xf1;
1464 }
1465
a43b25da 1466 cq->mdev = mdev;
f62b8bb8
AV
1467
1468 return 0;
1469}
1470
95b6c6a5
EBE
1471static int mlx5e_alloc_cq(struct mlx5e_channel *c,
1472 struct mlx5e_cq_param *param,
1473 struct mlx5e_cq *cq)
1474{
1475 struct mlx5_core_dev *mdev = c->priv->mdev;
1476 int err;
1477
1478 param->wq.buf_numa_node = cpu_to_node(c->cpu);
1479 param->wq.db_numa_node = cpu_to_node(c->cpu);
1480 param->eq_ix = c->ix;
1481
1482 err = mlx5e_alloc_cq_common(mdev, param, cq);
1483
1484 cq->napi = &c->napi;
1485 cq->channel = c;
1486
1487 return err;
1488}
1489
3b77235b 1490static void mlx5e_free_cq(struct mlx5e_cq *cq)
f62b8bb8 1491{
1c1b5228 1492 mlx5_cqwq_destroy(&cq->wq_ctrl);
f62b8bb8
AV
1493}
1494
3b77235b 1495static int mlx5e_create_cq(struct mlx5e_cq *cq, struct mlx5e_cq_param *param)
f62b8bb8 1496{
a43b25da 1497 struct mlx5_core_dev *mdev = cq->mdev;
f62b8bb8
AV
1498 struct mlx5_core_cq *mcq = &cq->mcq;
1499
1500 void *in;
1501 void *cqc;
1502 int inlen;
0b6e26ce 1503 unsigned int irqn_not_used;
f62b8bb8
AV
1504 int eqn;
1505 int err;
1506
1507 inlen = MLX5_ST_SZ_BYTES(create_cq_in) +
1c1b5228 1508 sizeof(u64) * cq->wq_ctrl.frag_buf.npages;
1b9a07ee 1509 in = kvzalloc(inlen, GFP_KERNEL);
f62b8bb8
AV
1510 if (!in)
1511 return -ENOMEM;
1512
1513 cqc = MLX5_ADDR_OF(create_cq_in, in, cq_context);
1514
1515 memcpy(cqc, param->cqc, sizeof(param->cqc));
1516
1c1b5228
TT
1517 mlx5_fill_page_frag_array(&cq->wq_ctrl.frag_buf,
1518 (__be64 *)MLX5_ADDR_OF(create_cq_in, in, pas));
f62b8bb8
AV
1519
1520 mlx5_vector2eqn(mdev, param->eq_ix, &eqn, &irqn_not_used);
1521
9908aa29 1522 MLX5_SET(cqc, cqc, cq_period_mode, param->cq_period_mode);
f62b8bb8 1523 MLX5_SET(cqc, cqc, c_eqn, eqn);
30aa60b3 1524 MLX5_SET(cqc, cqc, uar_page, mdev->priv.uar->index);
1c1b5228 1525 MLX5_SET(cqc, cqc, log_page_size, cq->wq_ctrl.frag_buf.page_shift -
68cdf5d6 1526 MLX5_ADAPTER_PAGE_SHIFT);
f62b8bb8
AV
1527 MLX5_SET64(cqc, cqc, dbr_addr, cq->wq_ctrl.db.dma);
1528
1529 err = mlx5_core_create_cq(mdev, mcq, in, inlen);
1530
1531 kvfree(in);
1532
1533 if (err)
1534 return err;
1535
1536 mlx5e_cq_arm(cq);
1537
1538 return 0;
1539}
1540
3b77235b 1541static void mlx5e_destroy_cq(struct mlx5e_cq *cq)
f62b8bb8 1542{
a43b25da 1543 mlx5_core_destroy_cq(cq->mdev, &cq->mcq);
f62b8bb8
AV
1544}
1545
1546static int mlx5e_open_cq(struct mlx5e_channel *c,
6a9764ef 1547 struct mlx5e_cq_moder moder,
f62b8bb8 1548 struct mlx5e_cq_param *param,
6a9764ef 1549 struct mlx5e_cq *cq)
f62b8bb8 1550{
a43b25da 1551 struct mlx5_core_dev *mdev = c->mdev;
f62b8bb8 1552 int err;
f62b8bb8 1553
3b77235b 1554 err = mlx5e_alloc_cq(c, param, cq);
f62b8bb8
AV
1555 if (err)
1556 return err;
1557
3b77235b 1558 err = mlx5e_create_cq(cq, param);
f62b8bb8 1559 if (err)
3b77235b 1560 goto err_free_cq;
f62b8bb8 1561
7524a5d8 1562 if (MLX5_CAP_GEN(mdev, cq_moderation))
6a9764ef 1563 mlx5_core_modify_cq_moderation(mdev, &cq->mcq, moder.usec, moder.pkts);
f62b8bb8
AV
1564 return 0;
1565
3b77235b
SM
1566err_free_cq:
1567 mlx5e_free_cq(cq);
f62b8bb8
AV
1568
1569 return err;
1570}
1571
1572static void mlx5e_close_cq(struct mlx5e_cq *cq)
1573{
f62b8bb8 1574 mlx5e_destroy_cq(cq);
3b77235b 1575 mlx5e_free_cq(cq);
f62b8bb8
AV
1576}
1577
1578static int mlx5e_get_cpu(struct mlx5e_priv *priv, int ix)
1579{
1580 return cpumask_first(priv->mdev->priv.irq_info[ix].mask);
1581}
1582
1583static int mlx5e_open_tx_cqs(struct mlx5e_channel *c,
6a9764ef 1584 struct mlx5e_params *params,
f62b8bb8
AV
1585 struct mlx5e_channel_param *cparam)
1586{
f62b8bb8
AV
1587 int err;
1588 int tc;
1589
1590 for (tc = 0; tc < c->num_tc; tc++) {
6a9764ef
SM
1591 err = mlx5e_open_cq(c, params->tx_cq_moderation,
1592 &cparam->tx_cq, &c->sq[tc].cq);
f62b8bb8
AV
1593 if (err)
1594 goto err_close_tx_cqs;
f62b8bb8
AV
1595 }
1596
1597 return 0;
1598
1599err_close_tx_cqs:
1600 for (tc--; tc >= 0; tc--)
1601 mlx5e_close_cq(&c->sq[tc].cq);
1602
1603 return err;
1604}
1605
1606static void mlx5e_close_tx_cqs(struct mlx5e_channel *c)
1607{
1608 int tc;
1609
1610 for (tc = 0; tc < c->num_tc; tc++)
1611 mlx5e_close_cq(&c->sq[tc].cq);
1612}
1613
1614static int mlx5e_open_sqs(struct mlx5e_channel *c,
6a9764ef 1615 struct mlx5e_params *params,
f62b8bb8
AV
1616 struct mlx5e_channel_param *cparam)
1617{
1618 int err;
1619 int tc;
1620
6a9764ef
SM
1621 for (tc = 0; tc < params->num_tc; tc++) {
1622 int txq_ix = c->ix + tc * params->num_channels;
acc6c595 1623
a43b25da
SM
1624 err = mlx5e_open_txqsq(c, c->priv->tisn[tc], txq_ix,
1625 params, &cparam->sq, &c->sq[tc]);
f62b8bb8
AV
1626 if (err)
1627 goto err_close_sqs;
1628 }
1629
1630 return 0;
1631
1632err_close_sqs:
1633 for (tc--; tc >= 0; tc--)
31391048 1634 mlx5e_close_txqsq(&c->sq[tc]);
f62b8bb8
AV
1635
1636 return err;
1637}
1638
1639static void mlx5e_close_sqs(struct mlx5e_channel *c)
1640{
1641 int tc;
1642
1643 for (tc = 0; tc < c->num_tc; tc++)
31391048 1644 mlx5e_close_txqsq(&c->sq[tc]);
f62b8bb8
AV
1645}
1646
507f0c81 1647static int mlx5e_set_sq_maxrate(struct net_device *dev,
31391048 1648 struct mlx5e_txqsq *sq, u32 rate)
507f0c81
YP
1649{
1650 struct mlx5e_priv *priv = netdev_priv(dev);
1651 struct mlx5_core_dev *mdev = priv->mdev;
33ad9711 1652 struct mlx5e_modify_sq_param msp = {0};
507f0c81
YP
1653 u16 rl_index = 0;
1654 int err;
1655
1656 if (rate == sq->rate_limit)
1657 /* nothing to do */
1658 return 0;
1659
1660 if (sq->rate_limit)
1661 /* remove current rl index to free space to next ones */
1662 mlx5_rl_remove_rate(mdev, sq->rate_limit);
1663
1664 sq->rate_limit = 0;
1665
1666 if (rate) {
1667 err = mlx5_rl_add_rate(mdev, rate, &rl_index);
1668 if (err) {
1669 netdev_err(dev, "Failed configuring rate %u: %d\n",
1670 rate, err);
1671 return err;
1672 }
1673 }
1674
33ad9711
SM
1675 msp.curr_state = MLX5_SQC_STATE_RDY;
1676 msp.next_state = MLX5_SQC_STATE_RDY;
1677 msp.rl_index = rl_index;
1678 msp.rl_update = true;
a43b25da 1679 err = mlx5e_modify_sq(mdev, sq->sqn, &msp);
507f0c81
YP
1680 if (err) {
1681 netdev_err(dev, "Failed configuring rate %u: %d\n",
1682 rate, err);
1683 /* remove the rate from the table */
1684 if (rate)
1685 mlx5_rl_remove_rate(mdev, rate);
1686 return err;
1687 }
1688
1689 sq->rate_limit = rate;
1690 return 0;
1691}
1692
1693static int mlx5e_set_tx_maxrate(struct net_device *dev, int index, u32 rate)
1694{
1695 struct mlx5e_priv *priv = netdev_priv(dev);
1696 struct mlx5_core_dev *mdev = priv->mdev;
acc6c595 1697 struct mlx5e_txqsq *sq = priv->txq2sq[index];
507f0c81
YP
1698 int err = 0;
1699
1700 if (!mlx5_rl_is_supported(mdev)) {
1701 netdev_err(dev, "Rate limiting is not supported on this device\n");
1702 return -EINVAL;
1703 }
1704
1705 /* rate is given in Mb/sec, HW config is in Kb/sec */
1706 rate = rate << 10;
1707
1708 /* Check whether rate in valid range, 0 is always valid */
1709 if (rate && !mlx5_rl_is_in_range(mdev, rate)) {
1710 netdev_err(dev, "TX rate %u, is not in range\n", rate);
1711 return -ERANGE;
1712 }
1713
1714 mutex_lock(&priv->state_lock);
1715 if (test_bit(MLX5E_STATE_OPENED, &priv->state))
1716 err = mlx5e_set_sq_maxrate(dev, sq, rate);
1717 if (!err)
1718 priv->tx_rates[index] = rate;
1719 mutex_unlock(&priv->state_lock);
1720
1721 return err;
1722}
1723
f62b8bb8 1724static int mlx5e_open_channel(struct mlx5e_priv *priv, int ix,
6a9764ef 1725 struct mlx5e_params *params,
f62b8bb8
AV
1726 struct mlx5e_channel_param *cparam,
1727 struct mlx5e_channel **cp)
1728{
6a9764ef 1729 struct mlx5e_cq_moder icocq_moder = {0, 0};
f62b8bb8
AV
1730 struct net_device *netdev = priv->netdev;
1731 int cpu = mlx5e_get_cpu(priv, ix);
1732 struct mlx5e_channel *c;
1733 int err;
1734
1735 c = kzalloc_node(sizeof(*c), GFP_KERNEL, cpu_to_node(cpu));
1736 if (!c)
1737 return -ENOMEM;
1738
1739 c->priv = priv;
a43b25da
SM
1740 c->mdev = priv->mdev;
1741 c->tstamp = &priv->tstamp;
f62b8bb8
AV
1742 c->ix = ix;
1743 c->cpu = cpu;
1744 c->pdev = &priv->mdev->pdev->dev;
1745 c->netdev = priv->netdev;
b50d292b 1746 c->mkey_be = cpu_to_be32(priv->mdev->mlx5e_res.mkey.key);
6a9764ef
SM
1747 c->num_tc = params->num_tc;
1748 c->xdp = !!params->xdp_prog;
cb3c7fd4 1749
f62b8bb8
AV
1750 netif_napi_add(netdev, &c->napi, mlx5e_napi_poll, 64);
1751
6a9764ef 1752 err = mlx5e_open_cq(c, icocq_moder, &cparam->icosq_cq, &c->icosq.cq);
f62b8bb8
AV
1753 if (err)
1754 goto err_napi_del;
1755
6a9764ef 1756 err = mlx5e_open_tx_cqs(c, params, cparam);
d3c9bc27
TT
1757 if (err)
1758 goto err_close_icosq_cq;
1759
6a9764ef 1760 err = mlx5e_open_cq(c, params->rx_cq_moderation, &cparam->rx_cq, &c->rq.cq);
f62b8bb8
AV
1761 if (err)
1762 goto err_close_tx_cqs;
f62b8bb8 1763
d7a0ecab 1764 /* XDP SQ CQ params are same as normal TXQ sq CQ params */
6a9764ef
SM
1765 err = c->xdp ? mlx5e_open_cq(c, params->tx_cq_moderation,
1766 &cparam->tx_cq, &c->rq.xdpsq.cq) : 0;
d7a0ecab
SM
1767 if (err)
1768 goto err_close_rx_cq;
1769
f62b8bb8
AV
1770 napi_enable(&c->napi);
1771
6a9764ef 1772 err = mlx5e_open_icosq(c, params, &cparam->icosq, &c->icosq);
f62b8bb8
AV
1773 if (err)
1774 goto err_disable_napi;
1775
6a9764ef 1776 err = mlx5e_open_sqs(c, params, cparam);
d3c9bc27
TT
1777 if (err)
1778 goto err_close_icosq;
1779
6a9764ef 1780 err = c->xdp ? mlx5e_open_xdpsq(c, params, &cparam->xdp_sq, &c->rq.xdpsq) : 0;
d7a0ecab
SM
1781 if (err)
1782 goto err_close_sqs;
b5503b99 1783
6a9764ef 1784 err = mlx5e_open_rq(c, params, &cparam->rq, &c->rq);
f62b8bb8 1785 if (err)
b5503b99 1786 goto err_close_xdp_sq;
f62b8bb8 1787
f62b8bb8
AV
1788 *cp = c;
1789
1790 return 0;
b5503b99 1791err_close_xdp_sq:
d7a0ecab 1792 if (c->xdp)
31391048 1793 mlx5e_close_xdpsq(&c->rq.xdpsq);
f62b8bb8
AV
1794
1795err_close_sqs:
1796 mlx5e_close_sqs(c);
1797
d3c9bc27 1798err_close_icosq:
31391048 1799 mlx5e_close_icosq(&c->icosq);
d3c9bc27 1800
f62b8bb8
AV
1801err_disable_napi:
1802 napi_disable(&c->napi);
d7a0ecab 1803 if (c->xdp)
31871f87 1804 mlx5e_close_cq(&c->rq.xdpsq.cq);
d7a0ecab
SM
1805
1806err_close_rx_cq:
f62b8bb8
AV
1807 mlx5e_close_cq(&c->rq.cq);
1808
1809err_close_tx_cqs:
1810 mlx5e_close_tx_cqs(c);
1811
d3c9bc27
TT
1812err_close_icosq_cq:
1813 mlx5e_close_cq(&c->icosq.cq);
1814
f62b8bb8
AV
1815err_napi_del:
1816 netif_napi_del(&c->napi);
1817 kfree(c);
1818
1819 return err;
1820}
1821
acc6c595
SM
1822static void mlx5e_activate_channel(struct mlx5e_channel *c)
1823{
1824 int tc;
1825
1826 for (tc = 0; tc < c->num_tc; tc++)
1827 mlx5e_activate_txqsq(&c->sq[tc]);
1828 mlx5e_activate_rq(&c->rq);
a43b25da 1829 netif_set_xps_queue(c->netdev, get_cpu_mask(c->cpu), c->ix);
acc6c595
SM
1830}
1831
1832static void mlx5e_deactivate_channel(struct mlx5e_channel *c)
1833{
1834 int tc;
1835
1836 mlx5e_deactivate_rq(&c->rq);
1837 for (tc = 0; tc < c->num_tc; tc++)
1838 mlx5e_deactivate_txqsq(&c->sq[tc]);
1839}
1840
f62b8bb8
AV
1841static void mlx5e_close_channel(struct mlx5e_channel *c)
1842{
1843 mlx5e_close_rq(&c->rq);
b5503b99 1844 if (c->xdp)
31391048 1845 mlx5e_close_xdpsq(&c->rq.xdpsq);
f62b8bb8 1846 mlx5e_close_sqs(c);
31391048 1847 mlx5e_close_icosq(&c->icosq);
f62b8bb8 1848 napi_disable(&c->napi);
b5503b99 1849 if (c->xdp)
31871f87 1850 mlx5e_close_cq(&c->rq.xdpsq.cq);
f62b8bb8
AV
1851 mlx5e_close_cq(&c->rq.cq);
1852 mlx5e_close_tx_cqs(c);
d3c9bc27 1853 mlx5e_close_cq(&c->icosq.cq);
f62b8bb8 1854 netif_napi_del(&c->napi);
7ae92ae5 1855
f62b8bb8
AV
1856 kfree(c);
1857}
1858
1859static void mlx5e_build_rq_param(struct mlx5e_priv *priv,
6a9764ef 1860 struct mlx5e_params *params,
f62b8bb8
AV
1861 struct mlx5e_rq_param *param)
1862{
1863 void *rqc = param->rqc;
1864 void *wq = MLX5_ADDR_OF(rqc, rqc, wq);
1865
6a9764ef 1866 switch (params->rq_wq_type) {
461017cb 1867 case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ:
6a9764ef
SM
1868 MLX5_SET(wq, wq, log_wqe_num_of_strides, params->mpwqe_log_num_strides - 9);
1869 MLX5_SET(wq, wq, log_wqe_stride_size, params->mpwqe_log_stride_sz - 6);
461017cb
TT
1870 MLX5_SET(wq, wq, wq_type, MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ);
1871 break;
1872 default: /* MLX5_WQ_TYPE_LINKED_LIST */
1873 MLX5_SET(wq, wq, wq_type, MLX5_WQ_TYPE_LINKED_LIST);
1874 }
1875
f62b8bb8
AV
1876 MLX5_SET(wq, wq, end_padding_mode, MLX5_WQ_END_PAD_MODE_ALIGN);
1877 MLX5_SET(wq, wq, log_wq_stride, ilog2(sizeof(struct mlx5e_rx_wqe)));
6a9764ef 1878 MLX5_SET(wq, wq, log_wq_sz, params->log_rq_size);
b50d292b 1879 MLX5_SET(wq, wq, pd, priv->mdev->mlx5e_res.pdn);
593cf338 1880 MLX5_SET(rqc, rqc, counter_set_id, priv->q_counter);
6a9764ef 1881 MLX5_SET(rqc, rqc, vsd, params->vlan_strip_disable);
102722fc 1882 MLX5_SET(rqc, rqc, scatter_fcs, params->scatter_fcs_en);
f62b8bb8 1883
311c7c71 1884 param->wq.buf_numa_node = dev_to_node(&priv->mdev->pdev->dev);
f62b8bb8
AV
1885 param->wq.linear = 1;
1886}
1887
556dd1b9
TT
1888static void mlx5e_build_drop_rq_param(struct mlx5e_rq_param *param)
1889{
1890 void *rqc = param->rqc;
1891 void *wq = MLX5_ADDR_OF(rqc, rqc, wq);
1892
1893 MLX5_SET(wq, wq, wq_type, MLX5_WQ_TYPE_LINKED_LIST);
1894 MLX5_SET(wq, wq, log_wq_stride, ilog2(sizeof(struct mlx5e_rx_wqe)));
1895}
1896
d3c9bc27
TT
1897static void mlx5e_build_sq_param_common(struct mlx5e_priv *priv,
1898 struct mlx5e_sq_param *param)
f62b8bb8
AV
1899{
1900 void *sqc = param->sqc;
1901 void *wq = MLX5_ADDR_OF(sqc, sqc, wq);
1902
f62b8bb8 1903 MLX5_SET(wq, wq, log_wq_stride, ilog2(MLX5_SEND_WQE_BB));
b50d292b 1904 MLX5_SET(wq, wq, pd, priv->mdev->mlx5e_res.pdn);
f62b8bb8 1905
311c7c71 1906 param->wq.buf_numa_node = dev_to_node(&priv->mdev->pdev->dev);
d3c9bc27
TT
1907}
1908
1909static void mlx5e_build_sq_param(struct mlx5e_priv *priv,
6a9764ef 1910 struct mlx5e_params *params,
d3c9bc27
TT
1911 struct mlx5e_sq_param *param)
1912{
1913 void *sqc = param->sqc;
1914 void *wq = MLX5_ADDR_OF(sqc, sqc, wq);
1915
1916 mlx5e_build_sq_param_common(priv, param);
6a9764ef 1917 MLX5_SET(wq, wq, log_wq_sz, params->log_sq_size);
f62b8bb8
AV
1918}
1919
1920static void mlx5e_build_common_cq_param(struct mlx5e_priv *priv,
1921 struct mlx5e_cq_param *param)
1922{
1923 void *cqc = param->cqc;
1924
30aa60b3 1925 MLX5_SET(cqc, cqc, uar_page, priv->mdev->priv.uar->index);
f62b8bb8
AV
1926}
1927
1928static void mlx5e_build_rx_cq_param(struct mlx5e_priv *priv,
6a9764ef 1929 struct mlx5e_params *params,
f62b8bb8
AV
1930 struct mlx5e_cq_param *param)
1931{
1932 void *cqc = param->cqc;
461017cb 1933 u8 log_cq_size;
f62b8bb8 1934
6a9764ef 1935 switch (params->rq_wq_type) {
461017cb 1936 case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ:
6a9764ef 1937 log_cq_size = params->log_rq_size + params->mpwqe_log_num_strides;
461017cb
TT
1938 break;
1939 default: /* MLX5_WQ_TYPE_LINKED_LIST */
6a9764ef 1940 log_cq_size = params->log_rq_size;
461017cb
TT
1941 }
1942
1943 MLX5_SET(cqc, cqc, log_cq_size, log_cq_size);
6a9764ef 1944 if (MLX5E_GET_PFLAG(params, MLX5E_PFLAG_RX_CQE_COMPRESS)) {
7219ab34
TT
1945 MLX5_SET(cqc, cqc, mini_cqe_res_format, MLX5_CQE_FORMAT_CSUM);
1946 MLX5_SET(cqc, cqc, cqe_comp_en, 1);
1947 }
f62b8bb8
AV
1948
1949 mlx5e_build_common_cq_param(priv, param);
1950}
1951
1952static void mlx5e_build_tx_cq_param(struct mlx5e_priv *priv,
6a9764ef 1953 struct mlx5e_params *params,
f62b8bb8
AV
1954 struct mlx5e_cq_param *param)
1955{
1956 void *cqc = param->cqc;
1957
6a9764ef 1958 MLX5_SET(cqc, cqc, log_cq_size, params->log_sq_size);
f62b8bb8
AV
1959
1960 mlx5e_build_common_cq_param(priv, param);
9908aa29
TT
1961
1962 param->cq_period_mode = MLX5_CQ_PERIOD_MODE_START_FROM_EQE;
f62b8bb8
AV
1963}
1964
d3c9bc27 1965static void mlx5e_build_ico_cq_param(struct mlx5e_priv *priv,
6a9764ef
SM
1966 u8 log_wq_size,
1967 struct mlx5e_cq_param *param)
d3c9bc27
TT
1968{
1969 void *cqc = param->cqc;
1970
1971 MLX5_SET(cqc, cqc, log_cq_size, log_wq_size);
1972
1973 mlx5e_build_common_cq_param(priv, param);
9908aa29
TT
1974
1975 param->cq_period_mode = MLX5_CQ_PERIOD_MODE_START_FROM_EQE;
d3c9bc27
TT
1976}
1977
1978static void mlx5e_build_icosq_param(struct mlx5e_priv *priv,
6a9764ef
SM
1979 u8 log_wq_size,
1980 struct mlx5e_sq_param *param)
d3c9bc27
TT
1981{
1982 void *sqc = param->sqc;
1983 void *wq = MLX5_ADDR_OF(sqc, sqc, wq);
1984
1985 mlx5e_build_sq_param_common(priv, param);
1986
1987 MLX5_SET(wq, wq, log_wq_sz, log_wq_size);
bc77b240 1988 MLX5_SET(sqc, sqc, reg_umr, MLX5_CAP_ETH(priv->mdev, reg_umr_sq));
d3c9bc27
TT
1989}
1990
b5503b99 1991static void mlx5e_build_xdpsq_param(struct mlx5e_priv *priv,
6a9764ef 1992 struct mlx5e_params *params,
b5503b99
SM
1993 struct mlx5e_sq_param *param)
1994{
1995 void *sqc = param->sqc;
1996 void *wq = MLX5_ADDR_OF(sqc, sqc, wq);
1997
1998 mlx5e_build_sq_param_common(priv, param);
6a9764ef 1999 MLX5_SET(wq, wq, log_wq_sz, params->log_sq_size);
b5503b99
SM
2000}
2001
6a9764ef
SM
2002static void mlx5e_build_channel_param(struct mlx5e_priv *priv,
2003 struct mlx5e_params *params,
2004 struct mlx5e_channel_param *cparam)
f62b8bb8 2005{
bc77b240 2006 u8 icosq_log_wq_sz = MLX5E_PARAMS_MINIMUM_LOG_SQ_SIZE;
d3c9bc27 2007
6a9764ef
SM
2008 mlx5e_build_rq_param(priv, params, &cparam->rq);
2009 mlx5e_build_sq_param(priv, params, &cparam->sq);
2010 mlx5e_build_xdpsq_param(priv, params, &cparam->xdp_sq);
2011 mlx5e_build_icosq_param(priv, icosq_log_wq_sz, &cparam->icosq);
2012 mlx5e_build_rx_cq_param(priv, params, &cparam->rx_cq);
2013 mlx5e_build_tx_cq_param(priv, params, &cparam->tx_cq);
2014 mlx5e_build_ico_cq_param(priv, icosq_log_wq_sz, &cparam->icosq_cq);
f62b8bb8
AV
2015}
2016
55c2503d
SM
2017int mlx5e_open_channels(struct mlx5e_priv *priv,
2018 struct mlx5e_channels *chs)
f62b8bb8 2019{
6b87663f 2020 struct mlx5e_channel_param *cparam;
03289b88 2021 int err = -ENOMEM;
f62b8bb8 2022 int i;
f62b8bb8 2023
6a9764ef 2024 chs->num = chs->params.num_channels;
03289b88 2025
ff9c852f 2026 chs->c = kcalloc(chs->num, sizeof(struct mlx5e_channel *), GFP_KERNEL);
6b87663f 2027 cparam = kzalloc(sizeof(struct mlx5e_channel_param), GFP_KERNEL);
acc6c595
SM
2028 if (!chs->c || !cparam)
2029 goto err_free;
f62b8bb8 2030
6a9764ef 2031 mlx5e_build_channel_param(priv, &chs->params, cparam);
ff9c852f 2032 for (i = 0; i < chs->num; i++) {
6a9764ef 2033 err = mlx5e_open_channel(priv, i, &chs->params, cparam, &chs->c[i]);
f62b8bb8
AV
2034 if (err)
2035 goto err_close_channels;
2036 }
2037
6b87663f 2038 kfree(cparam);
f62b8bb8
AV
2039 return 0;
2040
2041err_close_channels:
2042 for (i--; i >= 0; i--)
ff9c852f 2043 mlx5e_close_channel(chs->c[i]);
f62b8bb8 2044
acc6c595 2045err_free:
ff9c852f 2046 kfree(chs->c);
6b87663f 2047 kfree(cparam);
ff9c852f 2048 chs->num = 0;
f62b8bb8
AV
2049 return err;
2050}
2051
acc6c595 2052static void mlx5e_activate_channels(struct mlx5e_channels *chs)
f62b8bb8
AV
2053{
2054 int i;
2055
acc6c595
SM
2056 for (i = 0; i < chs->num; i++)
2057 mlx5e_activate_channel(chs->c[i]);
2058}
2059
2060static int mlx5e_wait_channels_min_rx_wqes(struct mlx5e_channels *chs)
2061{
2062 int err = 0;
2063 int i;
2064
2065 for (i = 0; i < chs->num; i++) {
2066 err = mlx5e_wait_for_min_rx_wqes(&chs->c[i]->rq);
2067 if (err)
2068 break;
2069 }
2070
2071 return err;
2072}
2073
2074static void mlx5e_deactivate_channels(struct mlx5e_channels *chs)
2075{
2076 int i;
2077
2078 for (i = 0; i < chs->num; i++)
2079 mlx5e_deactivate_channel(chs->c[i]);
2080}
2081
55c2503d 2082void mlx5e_close_channels(struct mlx5e_channels *chs)
acc6c595
SM
2083{
2084 int i;
c3b7c5c9 2085
ff9c852f
SM
2086 for (i = 0; i < chs->num; i++)
2087 mlx5e_close_channel(chs->c[i]);
f62b8bb8 2088
ff9c852f
SM
2089 kfree(chs->c);
2090 chs->num = 0;
f62b8bb8
AV
2091}
2092
a5f97fee
SM
2093static int
2094mlx5e_create_rqt(struct mlx5e_priv *priv, int sz, struct mlx5e_rqt *rqt)
f62b8bb8
AV
2095{
2096 struct mlx5_core_dev *mdev = priv->mdev;
f62b8bb8
AV
2097 void *rqtc;
2098 int inlen;
2099 int err;
1da36696 2100 u32 *in;
a5f97fee 2101 int i;
f62b8bb8 2102
f62b8bb8 2103 inlen = MLX5_ST_SZ_BYTES(create_rqt_in) + sizeof(u32) * sz;
1b9a07ee 2104 in = kvzalloc(inlen, GFP_KERNEL);
f62b8bb8
AV
2105 if (!in)
2106 return -ENOMEM;
2107
2108 rqtc = MLX5_ADDR_OF(create_rqt_in, in, rqt_context);
2109
2110 MLX5_SET(rqtc, rqtc, rqt_actual_size, sz);
2111 MLX5_SET(rqtc, rqtc, rqt_max_size, sz);
2112
a5f97fee
SM
2113 for (i = 0; i < sz; i++)
2114 MLX5_SET(rqtc, rqtc, rq_num[i], priv->drop_rq.rqn);
2be6967c 2115
398f3351
HHZ
2116 err = mlx5_core_create_rqt(mdev, in, inlen, &rqt->rqtn);
2117 if (!err)
2118 rqt->enabled = true;
f62b8bb8
AV
2119
2120 kvfree(in);
1da36696
TT
2121 return err;
2122}
2123
cb67b832 2124void mlx5e_destroy_rqt(struct mlx5e_priv *priv, struct mlx5e_rqt *rqt)
1da36696 2125{
398f3351
HHZ
2126 rqt->enabled = false;
2127 mlx5_core_destroy_rqt(priv->mdev, rqt->rqtn);
1da36696
TT
2128}
2129
8f493ffd 2130int mlx5e_create_indirect_rqt(struct mlx5e_priv *priv)
6bfd390b
HHZ
2131{
2132 struct mlx5e_rqt *rqt = &priv->indir_rqt;
8f493ffd 2133 int err;
6bfd390b 2134
8f493ffd
SM
2135 err = mlx5e_create_rqt(priv, MLX5E_INDIR_RQT_SIZE, rqt);
2136 if (err)
2137 mlx5_core_warn(priv->mdev, "create indirect rqts failed, %d\n", err);
2138 return err;
6bfd390b
HHZ
2139}
2140
cb67b832 2141int mlx5e_create_direct_rqts(struct mlx5e_priv *priv)
1da36696 2142{
398f3351 2143 struct mlx5e_rqt *rqt;
1da36696
TT
2144 int err;
2145 int ix;
2146
6bfd390b 2147 for (ix = 0; ix < priv->profile->max_nch(priv->mdev); ix++) {
398f3351 2148 rqt = &priv->direct_tir[ix].rqt;
a5f97fee 2149 err = mlx5e_create_rqt(priv, 1 /*size */, rqt);
1da36696
TT
2150 if (err)
2151 goto err_destroy_rqts;
2152 }
2153
2154 return 0;
2155
2156err_destroy_rqts:
8f493ffd 2157 mlx5_core_warn(priv->mdev, "create direct rqts failed, %d\n", err);
1da36696 2158 for (ix--; ix >= 0; ix--)
398f3351 2159 mlx5e_destroy_rqt(priv, &priv->direct_tir[ix].rqt);
1da36696 2160
f62b8bb8
AV
2161 return err;
2162}
2163
8f493ffd
SM
2164void mlx5e_destroy_direct_rqts(struct mlx5e_priv *priv)
2165{
2166 int i;
2167
2168 for (i = 0; i < priv->profile->max_nch(priv->mdev); i++)
2169 mlx5e_destroy_rqt(priv, &priv->direct_tir[i].rqt);
2170}
2171
a5f97fee
SM
2172static int mlx5e_rx_hash_fn(int hfunc)
2173{
2174 return (hfunc == ETH_RSS_HASH_TOP) ?
2175 MLX5_RX_HASH_FN_TOEPLITZ :
2176 MLX5_RX_HASH_FN_INVERTED_XOR8;
2177}
2178
2179static int mlx5e_bits_invert(unsigned long a, int size)
2180{
2181 int inv = 0;
2182 int i;
2183
2184 for (i = 0; i < size; i++)
2185 inv |= (test_bit(size - i - 1, &a) ? 1 : 0) << i;
2186
2187 return inv;
2188}
2189
2190static void mlx5e_fill_rqt_rqns(struct mlx5e_priv *priv, int sz,
2191 struct mlx5e_redirect_rqt_param rrp, void *rqtc)
2192{
2193 int i;
2194
2195 for (i = 0; i < sz; i++) {
2196 u32 rqn;
2197
2198 if (rrp.is_rss) {
2199 int ix = i;
2200
2201 if (rrp.rss.hfunc == ETH_RSS_HASH_XOR)
2202 ix = mlx5e_bits_invert(i, ilog2(sz));
2203
6a9764ef 2204 ix = priv->channels.params.indirection_rqt[ix];
a5f97fee
SM
2205 rqn = rrp.rss.channels->c[ix]->rq.rqn;
2206 } else {
2207 rqn = rrp.rqn;
2208 }
2209 MLX5_SET(rqtc, rqtc, rq_num[i], rqn);
2210 }
2211}
2212
2213int mlx5e_redirect_rqt(struct mlx5e_priv *priv, u32 rqtn, int sz,
2214 struct mlx5e_redirect_rqt_param rrp)
5c50368f
AS
2215{
2216 struct mlx5_core_dev *mdev = priv->mdev;
5c50368f
AS
2217 void *rqtc;
2218 int inlen;
1da36696 2219 u32 *in;
5c50368f
AS
2220 int err;
2221
5c50368f 2222 inlen = MLX5_ST_SZ_BYTES(modify_rqt_in) + sizeof(u32) * sz;
1b9a07ee 2223 in = kvzalloc(inlen, GFP_KERNEL);
5c50368f
AS
2224 if (!in)
2225 return -ENOMEM;
2226
2227 rqtc = MLX5_ADDR_OF(modify_rqt_in, in, ctx);
2228
2229 MLX5_SET(rqtc, rqtc, rqt_actual_size, sz);
5c50368f 2230 MLX5_SET(modify_rqt_in, in, bitmask.rqn_list, 1);
a5f97fee 2231 mlx5e_fill_rqt_rqns(priv, sz, rrp, rqtc);
1da36696 2232 err = mlx5_core_modify_rqt(mdev, rqtn, in, inlen);
5c50368f
AS
2233
2234 kvfree(in);
5c50368f
AS
2235 return err;
2236}
2237
a5f97fee
SM
2238static u32 mlx5e_get_direct_rqn(struct mlx5e_priv *priv, int ix,
2239 struct mlx5e_redirect_rqt_param rrp)
2240{
2241 if (!rrp.is_rss)
2242 return rrp.rqn;
2243
2244 if (ix >= rrp.rss.channels->num)
2245 return priv->drop_rq.rqn;
2246
2247 return rrp.rss.channels->c[ix]->rq.rqn;
2248}
2249
2250static void mlx5e_redirect_rqts(struct mlx5e_priv *priv,
2251 struct mlx5e_redirect_rqt_param rrp)
40ab6a6e 2252{
1da36696
TT
2253 u32 rqtn;
2254 int ix;
2255
398f3351 2256 if (priv->indir_rqt.enabled) {
a5f97fee 2257 /* RSS RQ table */
398f3351 2258 rqtn = priv->indir_rqt.rqtn;
a5f97fee 2259 mlx5e_redirect_rqt(priv, rqtn, MLX5E_INDIR_RQT_SIZE, rrp);
398f3351
HHZ
2260 }
2261
a5f97fee
SM
2262 for (ix = 0; ix < priv->profile->max_nch(priv->mdev); ix++) {
2263 struct mlx5e_redirect_rqt_param direct_rrp = {
2264 .is_rss = false,
95632791
AM
2265 {
2266 .rqn = mlx5e_get_direct_rqn(priv, ix, rrp)
2267 },
a5f97fee
SM
2268 };
2269
2270 /* Direct RQ Tables */
398f3351
HHZ
2271 if (!priv->direct_tir[ix].rqt.enabled)
2272 continue;
a5f97fee 2273
398f3351 2274 rqtn = priv->direct_tir[ix].rqt.rqtn;
a5f97fee 2275 mlx5e_redirect_rqt(priv, rqtn, 1, direct_rrp);
1da36696 2276 }
40ab6a6e
AS
2277}
2278
a5f97fee
SM
2279static void mlx5e_redirect_rqts_to_channels(struct mlx5e_priv *priv,
2280 struct mlx5e_channels *chs)
2281{
2282 struct mlx5e_redirect_rqt_param rrp = {
2283 .is_rss = true,
95632791
AM
2284 {
2285 .rss = {
2286 .channels = chs,
2287 .hfunc = chs->params.rss_hfunc,
2288 }
2289 },
a5f97fee
SM
2290 };
2291
2292 mlx5e_redirect_rqts(priv, rrp);
2293}
2294
2295static void mlx5e_redirect_rqts_to_drop(struct mlx5e_priv *priv)
2296{
2297 struct mlx5e_redirect_rqt_param drop_rrp = {
2298 .is_rss = false,
95632791
AM
2299 {
2300 .rqn = priv->drop_rq.rqn,
2301 },
a5f97fee
SM
2302 };
2303
2304 mlx5e_redirect_rqts(priv, drop_rrp);
2305}
2306
6a9764ef 2307static void mlx5e_build_tir_ctx_lro(struct mlx5e_params *params, void *tirc)
5c50368f 2308{
6a9764ef 2309 if (!params->lro_en)
5c50368f
AS
2310 return;
2311
2312#define ROUGH_MAX_L2_L3_HDR_SZ 256
2313
2314 MLX5_SET(tirc, tirc, lro_enable_mask,
2315 MLX5_TIRC_LRO_ENABLE_MASK_IPV4_LRO |
2316 MLX5_TIRC_LRO_ENABLE_MASK_IPV6_LRO);
2317 MLX5_SET(tirc, tirc, lro_max_ip_payload_size,
6a9764ef
SM
2318 (params->lro_wqe_sz - ROUGH_MAX_L2_L3_HDR_SZ) >> 8);
2319 MLX5_SET(tirc, tirc, lro_timeout_period_usecs, params->lro_timeout);
5c50368f
AS
2320}
2321
6a9764ef
SM
2322void mlx5e_build_indir_tir_ctx_hash(struct mlx5e_params *params,
2323 enum mlx5e_traffic_types tt,
2324 void *tirc)
bdfc028d 2325{
a100ff3e
GP
2326 void *hfso = MLX5_ADDR_OF(tirc, tirc, rx_hash_field_selector_outer);
2327
2328#define MLX5_HASH_IP (MLX5_HASH_FIELD_SEL_SRC_IP |\
2329 MLX5_HASH_FIELD_SEL_DST_IP)
2330
2331#define MLX5_HASH_IP_L4PORTS (MLX5_HASH_FIELD_SEL_SRC_IP |\
2332 MLX5_HASH_FIELD_SEL_DST_IP |\
2333 MLX5_HASH_FIELD_SEL_L4_SPORT |\
2334 MLX5_HASH_FIELD_SEL_L4_DPORT)
2335
2336#define MLX5_HASH_IP_IPSEC_SPI (MLX5_HASH_FIELD_SEL_SRC_IP |\
2337 MLX5_HASH_FIELD_SEL_DST_IP |\
2338 MLX5_HASH_FIELD_SEL_IPSEC_SPI)
2339
6a9764ef
SM
2340 MLX5_SET(tirc, tirc, rx_hash_fn, mlx5e_rx_hash_fn(params->rss_hfunc));
2341 if (params->rss_hfunc == ETH_RSS_HASH_TOP) {
bdfc028d
TT
2342 void *rss_key = MLX5_ADDR_OF(tirc, tirc,
2343 rx_hash_toeplitz_key);
2344 size_t len = MLX5_FLD_SZ_BYTES(tirc,
2345 rx_hash_toeplitz_key);
2346
2347 MLX5_SET(tirc, tirc, rx_hash_symmetric, 1);
6a9764ef 2348 memcpy(rss_key, params->toeplitz_hash_key, len);
bdfc028d 2349 }
a100ff3e
GP
2350
2351 switch (tt) {
2352 case MLX5E_TT_IPV4_TCP:
2353 MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
2354 MLX5_L3_PROT_TYPE_IPV4);
2355 MLX5_SET(rx_hash_field_select, hfso, l4_prot_type,
2356 MLX5_L4_PROT_TYPE_TCP);
2357 MLX5_SET(rx_hash_field_select, hfso, selected_fields,
2358 MLX5_HASH_IP_L4PORTS);
2359 break;
2360
2361 case MLX5E_TT_IPV6_TCP:
2362 MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
2363 MLX5_L3_PROT_TYPE_IPV6);
2364 MLX5_SET(rx_hash_field_select, hfso, l4_prot_type,
2365 MLX5_L4_PROT_TYPE_TCP);
2366 MLX5_SET(rx_hash_field_select, hfso, selected_fields,
2367 MLX5_HASH_IP_L4PORTS);
2368 break;
2369
2370 case MLX5E_TT_IPV4_UDP:
2371 MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
2372 MLX5_L3_PROT_TYPE_IPV4);
2373 MLX5_SET(rx_hash_field_select, hfso, l4_prot_type,
2374 MLX5_L4_PROT_TYPE_UDP);
2375 MLX5_SET(rx_hash_field_select, hfso, selected_fields,
2376 MLX5_HASH_IP_L4PORTS);
2377 break;
2378
2379 case MLX5E_TT_IPV6_UDP:
2380 MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
2381 MLX5_L3_PROT_TYPE_IPV6);
2382 MLX5_SET(rx_hash_field_select, hfso, l4_prot_type,
2383 MLX5_L4_PROT_TYPE_UDP);
2384 MLX5_SET(rx_hash_field_select, hfso, selected_fields,
2385 MLX5_HASH_IP_L4PORTS);
2386 break;
2387
2388 case MLX5E_TT_IPV4_IPSEC_AH:
2389 MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
2390 MLX5_L3_PROT_TYPE_IPV4);
2391 MLX5_SET(rx_hash_field_select, hfso, selected_fields,
2392 MLX5_HASH_IP_IPSEC_SPI);
2393 break;
2394
2395 case MLX5E_TT_IPV6_IPSEC_AH:
2396 MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
2397 MLX5_L3_PROT_TYPE_IPV6);
2398 MLX5_SET(rx_hash_field_select, hfso, selected_fields,
2399 MLX5_HASH_IP_IPSEC_SPI);
2400 break;
2401
2402 case MLX5E_TT_IPV4_IPSEC_ESP:
2403 MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
2404 MLX5_L3_PROT_TYPE_IPV4);
2405 MLX5_SET(rx_hash_field_select, hfso, selected_fields,
2406 MLX5_HASH_IP_IPSEC_SPI);
2407 break;
2408
2409 case MLX5E_TT_IPV6_IPSEC_ESP:
2410 MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
2411 MLX5_L3_PROT_TYPE_IPV6);
2412 MLX5_SET(rx_hash_field_select, hfso, selected_fields,
2413 MLX5_HASH_IP_IPSEC_SPI);
2414 break;
2415
2416 case MLX5E_TT_IPV4:
2417 MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
2418 MLX5_L3_PROT_TYPE_IPV4);
2419 MLX5_SET(rx_hash_field_select, hfso, selected_fields,
2420 MLX5_HASH_IP);
2421 break;
2422
2423 case MLX5E_TT_IPV6:
2424 MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
2425 MLX5_L3_PROT_TYPE_IPV6);
2426 MLX5_SET(rx_hash_field_select, hfso, selected_fields,
2427 MLX5_HASH_IP);
2428 break;
2429 default:
2430 WARN_ONCE(true, "%s: bad traffic type!\n", __func__);
2431 }
bdfc028d
TT
2432}
2433
ab0394fe 2434static int mlx5e_modify_tirs_lro(struct mlx5e_priv *priv)
5c50368f
AS
2435{
2436 struct mlx5_core_dev *mdev = priv->mdev;
2437
2438 void *in;
2439 void *tirc;
2440 int inlen;
2441 int err;
ab0394fe 2442 int tt;
1da36696 2443 int ix;
5c50368f
AS
2444
2445 inlen = MLX5_ST_SZ_BYTES(modify_tir_in);
1b9a07ee 2446 in = kvzalloc(inlen, GFP_KERNEL);
5c50368f
AS
2447 if (!in)
2448 return -ENOMEM;
2449
2450 MLX5_SET(modify_tir_in, in, bitmask.lro, 1);
2451 tirc = MLX5_ADDR_OF(modify_tir_in, in, ctx);
2452
6a9764ef 2453 mlx5e_build_tir_ctx_lro(&priv->channels.params, tirc);
5c50368f 2454
1da36696 2455 for (tt = 0; tt < MLX5E_NUM_INDIR_TIRS; tt++) {
724b2aa1 2456 err = mlx5_core_modify_tir(mdev, priv->indir_tir[tt].tirn, in,
1da36696 2457 inlen);
ab0394fe 2458 if (err)
1da36696 2459 goto free_in;
ab0394fe 2460 }
5c50368f 2461
6bfd390b 2462 for (ix = 0; ix < priv->profile->max_nch(priv->mdev); ix++) {
1da36696
TT
2463 err = mlx5_core_modify_tir(mdev, priv->direct_tir[ix].tirn,
2464 in, inlen);
2465 if (err)
2466 goto free_in;
2467 }
2468
2469free_in:
5c50368f
AS
2470 kvfree(in);
2471
2472 return err;
2473}
2474
cd255eff 2475static int mlx5e_set_mtu(struct mlx5e_priv *priv, u16 mtu)
40ab6a6e 2476{
40ab6a6e 2477 struct mlx5_core_dev *mdev = priv->mdev;
c139dbfd 2478 u16 hw_mtu = MLX5E_SW2HW_MTU(priv, mtu);
40ab6a6e
AS
2479 int err;
2480
cd255eff 2481 err = mlx5_set_port_mtu(mdev, hw_mtu, 1);
40ab6a6e
AS
2482 if (err)
2483 return err;
2484
cd255eff
SM
2485 /* Update vport context MTU */
2486 mlx5_modify_nic_vport_mtu(mdev, hw_mtu);
2487 return 0;
2488}
40ab6a6e 2489
cd255eff
SM
2490static void mlx5e_query_mtu(struct mlx5e_priv *priv, u16 *mtu)
2491{
2492 struct mlx5_core_dev *mdev = priv->mdev;
2493 u16 hw_mtu = 0;
2494 int err;
40ab6a6e 2495
cd255eff
SM
2496 err = mlx5_query_nic_vport_mtu(mdev, &hw_mtu);
2497 if (err || !hw_mtu) /* fallback to port oper mtu */
2498 mlx5_query_port_oper_mtu(mdev, &hw_mtu, 1);
2499
c139dbfd 2500 *mtu = MLX5E_HW2SW_MTU(priv, hw_mtu);
cd255eff
SM
2501}
2502
2e20a151 2503static int mlx5e_set_dev_port_mtu(struct mlx5e_priv *priv)
cd255eff 2504{
2e20a151 2505 struct net_device *netdev = priv->netdev;
cd255eff
SM
2506 u16 mtu;
2507 int err;
2508
2509 err = mlx5e_set_mtu(priv, netdev->mtu);
2510 if (err)
2511 return err;
40ab6a6e 2512
cd255eff
SM
2513 mlx5e_query_mtu(priv, &mtu);
2514 if (mtu != netdev->mtu)
2515 netdev_warn(netdev, "%s: VPort MTU %d is different than netdev mtu %d\n",
2516 __func__, mtu, netdev->mtu);
40ab6a6e 2517
cd255eff 2518 netdev->mtu = mtu;
40ab6a6e
AS
2519 return 0;
2520}
2521
08fb1dac
SM
2522static void mlx5e_netdev_set_tcs(struct net_device *netdev)
2523{
2524 struct mlx5e_priv *priv = netdev_priv(netdev);
6a9764ef
SM
2525 int nch = priv->channels.params.num_channels;
2526 int ntc = priv->channels.params.num_tc;
08fb1dac
SM
2527 int tc;
2528
2529 netdev_reset_tc(netdev);
2530
2531 if (ntc == 1)
2532 return;
2533
2534 netdev_set_num_tc(netdev, ntc);
2535
7ccdd084
RS
2536 /* Map netdev TCs to offset 0
2537 * We have our own UP to TXQ mapping for QoS
2538 */
08fb1dac 2539 for (tc = 0; tc < ntc; tc++)
7ccdd084 2540 netdev_set_tc_queue(netdev, tc, nch, 0);
08fb1dac
SM
2541}
2542
acc6c595
SM
2543static void mlx5e_build_channels_tx_maps(struct mlx5e_priv *priv)
2544{
2545 struct mlx5e_channel *c;
2546 struct mlx5e_txqsq *sq;
2547 int i, tc;
2548
2549 for (i = 0; i < priv->channels.num; i++)
2550 for (tc = 0; tc < priv->profile->max_tc; tc++)
2551 priv->channel_tc2txq[i][tc] = i + tc * priv->channels.num;
2552
2553 for (i = 0; i < priv->channels.num; i++) {
2554 c = priv->channels.c[i];
2555 for (tc = 0; tc < c->num_tc; tc++) {
2556 sq = &c->sq[tc];
2557 priv->txq2sq[sq->txq_ix] = sq;
2558 }
2559 }
2560}
2561
955bc480
SM
2562static bool mlx5e_is_eswitch_vport_mngr(struct mlx5_core_dev *mdev)
2563{
2564 return (MLX5_CAP_GEN(mdev, vport_group_manager) &&
2565 MLX5_CAP_GEN(mdev, port_type) == MLX5_CAP_PORT_TYPE_ETH);
2566}
2567
603f4a45 2568void mlx5e_activate_priv_channels(struct mlx5e_priv *priv)
acc6c595 2569{
9008ae07
SM
2570 int num_txqs = priv->channels.num * priv->channels.params.num_tc;
2571 struct net_device *netdev = priv->netdev;
2572
2573 mlx5e_netdev_set_tcs(netdev);
053ee0a7
TR
2574 netif_set_real_num_tx_queues(netdev, num_txqs);
2575 netif_set_real_num_rx_queues(netdev, priv->channels.num);
9008ae07 2576
acc6c595
SM
2577 mlx5e_build_channels_tx_maps(priv);
2578 mlx5e_activate_channels(&priv->channels);
2579 netif_tx_start_all_queues(priv->netdev);
9008ae07 2580
955bc480 2581 if (mlx5e_is_eswitch_vport_mngr(priv->mdev))
9008ae07
SM
2582 mlx5e_add_sqs_fwd_rules(priv);
2583
acc6c595 2584 mlx5e_wait_channels_min_rx_wqes(&priv->channels);
9008ae07 2585 mlx5e_redirect_rqts_to_channels(priv, &priv->channels);
acc6c595
SM
2586}
2587
603f4a45 2588void mlx5e_deactivate_priv_channels(struct mlx5e_priv *priv)
acc6c595 2589{
9008ae07
SM
2590 mlx5e_redirect_rqts_to_drop(priv);
2591
955bc480 2592 if (mlx5e_is_eswitch_vport_mngr(priv->mdev))
9008ae07
SM
2593 mlx5e_remove_sqs_fwd_rules(priv);
2594
acc6c595
SM
2595 /* FIXME: This is a W/A only for tx timeout watch dog false alarm when
2596 * polling for inactive tx queues.
2597 */
2598 netif_tx_stop_all_queues(priv->netdev);
2599 netif_tx_disable(priv->netdev);
2600 mlx5e_deactivate_channels(&priv->channels);
2601}
2602
55c2503d 2603void mlx5e_switch_priv_channels(struct mlx5e_priv *priv,
2e20a151
SM
2604 struct mlx5e_channels *new_chs,
2605 mlx5e_fp_hw_modify hw_modify)
55c2503d
SM
2606{
2607 struct net_device *netdev = priv->netdev;
2608 int new_num_txqs;
7ca42c80 2609 int carrier_ok;
55c2503d
SM
2610 new_num_txqs = new_chs->num * new_chs->params.num_tc;
2611
7ca42c80 2612 carrier_ok = netif_carrier_ok(netdev);
55c2503d
SM
2613 netif_carrier_off(netdev);
2614
2615 if (new_num_txqs < netdev->real_num_tx_queues)
2616 netif_set_real_num_tx_queues(netdev, new_num_txqs);
2617
2618 mlx5e_deactivate_priv_channels(priv);
2619 mlx5e_close_channels(&priv->channels);
2620
2621 priv->channels = *new_chs;
2622
2e20a151
SM
2623 /* New channels are ready to roll, modify HW settings if needed */
2624 if (hw_modify)
2625 hw_modify(priv);
2626
55c2503d
SM
2627 mlx5e_refresh_tirs(priv, false);
2628 mlx5e_activate_priv_channels(priv);
2629
7ca42c80
ES
2630 /* return carrier back if needed */
2631 if (carrier_ok)
2632 netif_carrier_on(netdev);
55c2503d
SM
2633}
2634
40ab6a6e
AS
2635int mlx5e_open_locked(struct net_device *netdev)
2636{
2637 struct mlx5e_priv *priv = netdev_priv(netdev);
40ab6a6e
AS
2638 int err;
2639
2640 set_bit(MLX5E_STATE_OPENED, &priv->state);
2641
ff9c852f 2642 err = mlx5e_open_channels(priv, &priv->channels);
acc6c595 2643 if (err)
343b29f3 2644 goto err_clear_state_opened_flag;
40ab6a6e 2645
b676f653 2646 mlx5e_refresh_tirs(priv, false);
acc6c595 2647 mlx5e_activate_priv_channels(priv);
7ca42c80
ES
2648 if (priv->profile->update_carrier)
2649 priv->profile->update_carrier(priv);
ef9814de 2650 mlx5e_timestamp_init(priv);
be4891af 2651
cb67b832
HHZ
2652 if (priv->profile->update_stats)
2653 queue_delayed_work(priv->wq, &priv->update_stats_work, 0);
40ab6a6e 2654
9b37b07f 2655 return 0;
343b29f3
AS
2656
2657err_clear_state_opened_flag:
2658 clear_bit(MLX5E_STATE_OPENED, &priv->state);
2659 return err;
40ab6a6e
AS
2660}
2661
cb67b832 2662int mlx5e_open(struct net_device *netdev)
40ab6a6e
AS
2663{
2664 struct mlx5e_priv *priv = netdev_priv(netdev);
2665 int err;
2666
2667 mutex_lock(&priv->state_lock);
2668 err = mlx5e_open_locked(netdev);
2669 mutex_unlock(&priv->state_lock);
2670
2671 return err;
2672}
2673
2674int mlx5e_close_locked(struct net_device *netdev)
2675{
2676 struct mlx5e_priv *priv = netdev_priv(netdev);
2677
a1985740
AS
2678 /* May already be CLOSED in case a previous configuration operation
2679 * (e.g RX/TX queue size change) that involves close&open failed.
2680 */
2681 if (!test_bit(MLX5E_STATE_OPENED, &priv->state))
2682 return 0;
2683
40ab6a6e
AS
2684 clear_bit(MLX5E_STATE_OPENED, &priv->state);
2685
ef9814de 2686 mlx5e_timestamp_cleanup(priv);
40ab6a6e 2687 netif_carrier_off(priv->netdev);
acc6c595
SM
2688 mlx5e_deactivate_priv_channels(priv);
2689 mlx5e_close_channels(&priv->channels);
40ab6a6e
AS
2690
2691 return 0;
2692}
2693
cb67b832 2694int mlx5e_close(struct net_device *netdev)
40ab6a6e
AS
2695{
2696 struct mlx5e_priv *priv = netdev_priv(netdev);
2697 int err;
2698
26e59d80
MHY
2699 if (!netif_device_present(netdev))
2700 return -ENODEV;
2701
40ab6a6e
AS
2702 mutex_lock(&priv->state_lock);
2703 err = mlx5e_close_locked(netdev);
2704 mutex_unlock(&priv->state_lock);
2705
2706 return err;
2707}
2708
a43b25da 2709static int mlx5e_alloc_drop_rq(struct mlx5_core_dev *mdev,
3b77235b
SM
2710 struct mlx5e_rq *rq,
2711 struct mlx5e_rq_param *param)
40ab6a6e 2712{
40ab6a6e
AS
2713 void *rqc = param->rqc;
2714 void *rqc_wq = MLX5_ADDR_OF(rqc, rqc, wq);
2715 int err;
2716
2717 param->wq.db_numa_node = param->wq.buf_numa_node;
2718
2719 err = mlx5_wq_ll_create(mdev, &param->wq, rqc_wq, &rq->wq,
2720 &rq->wq_ctrl);
2721 if (err)
2722 return err;
2723
a43b25da 2724 rq->mdev = mdev;
40ab6a6e
AS
2725
2726 return 0;
2727}
2728
a43b25da 2729static int mlx5e_alloc_drop_cq(struct mlx5_core_dev *mdev,
3b77235b
SM
2730 struct mlx5e_cq *cq,
2731 struct mlx5e_cq_param *param)
40ab6a6e 2732{
95b6c6a5 2733 return mlx5e_alloc_cq_common(mdev, param, cq);
40ab6a6e
AS
2734}
2735
a43b25da
SM
2736static int mlx5e_open_drop_rq(struct mlx5_core_dev *mdev,
2737 struct mlx5e_rq *drop_rq)
40ab6a6e 2738{
a43b25da
SM
2739 struct mlx5e_cq_param cq_param = {};
2740 struct mlx5e_rq_param rq_param = {};
2741 struct mlx5e_cq *cq = &drop_rq->cq;
40ab6a6e
AS
2742 int err;
2743
556dd1b9 2744 mlx5e_build_drop_rq_param(&rq_param);
40ab6a6e 2745
a43b25da 2746 err = mlx5e_alloc_drop_cq(mdev, cq, &cq_param);
40ab6a6e
AS
2747 if (err)
2748 return err;
2749
3b77235b 2750 err = mlx5e_create_cq(cq, &cq_param);
40ab6a6e 2751 if (err)
3b77235b 2752 goto err_free_cq;
40ab6a6e 2753
a43b25da 2754 err = mlx5e_alloc_drop_rq(mdev, drop_rq, &rq_param);
40ab6a6e 2755 if (err)
3b77235b 2756 goto err_destroy_cq;
40ab6a6e 2757
a43b25da 2758 err = mlx5e_create_rq(drop_rq, &rq_param);
40ab6a6e 2759 if (err)
3b77235b 2760 goto err_free_rq;
40ab6a6e
AS
2761
2762 return 0;
2763
3b77235b 2764err_free_rq:
a43b25da 2765 mlx5e_free_rq(drop_rq);
40ab6a6e
AS
2766
2767err_destroy_cq:
a43b25da 2768 mlx5e_destroy_cq(cq);
40ab6a6e 2769
3b77235b 2770err_free_cq:
a43b25da 2771 mlx5e_free_cq(cq);
3b77235b 2772
40ab6a6e
AS
2773 return err;
2774}
2775
a43b25da 2776static void mlx5e_close_drop_rq(struct mlx5e_rq *drop_rq)
40ab6a6e 2777{
a43b25da
SM
2778 mlx5e_destroy_rq(drop_rq);
2779 mlx5e_free_rq(drop_rq);
2780 mlx5e_destroy_cq(&drop_rq->cq);
2781 mlx5e_free_cq(&drop_rq->cq);
40ab6a6e
AS
2782}
2783
5426a0b2
SM
2784int mlx5e_create_tis(struct mlx5_core_dev *mdev, int tc,
2785 u32 underlay_qpn, u32 *tisn)
40ab6a6e 2786{
c4f287c4 2787 u32 in[MLX5_ST_SZ_DW(create_tis_in)] = {0};
40ab6a6e
AS
2788 void *tisc = MLX5_ADDR_OF(create_tis_in, in, ctx);
2789
08fb1dac 2790 MLX5_SET(tisc, tisc, prio, tc << 1);
5426a0b2 2791 MLX5_SET(tisc, tisc, underlay_qpn, underlay_qpn);
b50d292b 2792 MLX5_SET(tisc, tisc, transport_domain, mdev->mlx5e_res.td.tdn);
db60b802
AH
2793
2794 if (mlx5_lag_is_lacp_owner(mdev))
2795 MLX5_SET(tisc, tisc, strict_lag_tx_port_affinity, 1);
2796
5426a0b2 2797 return mlx5_core_create_tis(mdev, in, sizeof(in), tisn);
40ab6a6e
AS
2798}
2799
5426a0b2 2800void mlx5e_destroy_tis(struct mlx5_core_dev *mdev, u32 tisn)
40ab6a6e 2801{
5426a0b2 2802 mlx5_core_destroy_tis(mdev, tisn);
40ab6a6e
AS
2803}
2804
cb67b832 2805int mlx5e_create_tises(struct mlx5e_priv *priv)
40ab6a6e
AS
2806{
2807 int err;
2808 int tc;
2809
6bfd390b 2810 for (tc = 0; tc < priv->profile->max_tc; tc++) {
5426a0b2 2811 err = mlx5e_create_tis(priv->mdev, tc, 0, &priv->tisn[tc]);
40ab6a6e
AS
2812 if (err)
2813 goto err_close_tises;
2814 }
2815
2816 return 0;
2817
2818err_close_tises:
2819 for (tc--; tc >= 0; tc--)
5426a0b2 2820 mlx5e_destroy_tis(priv->mdev, priv->tisn[tc]);
40ab6a6e
AS
2821
2822 return err;
2823}
2824
cb67b832 2825void mlx5e_cleanup_nic_tx(struct mlx5e_priv *priv)
40ab6a6e
AS
2826{
2827 int tc;
2828
6bfd390b 2829 for (tc = 0; tc < priv->profile->max_tc; tc++)
5426a0b2 2830 mlx5e_destroy_tis(priv->mdev, priv->tisn[tc]);
40ab6a6e
AS
2831}
2832
6a9764ef
SM
2833static void mlx5e_build_indir_tir_ctx(struct mlx5e_priv *priv,
2834 enum mlx5e_traffic_types tt,
2835 u32 *tirc)
f62b8bb8 2836{
b50d292b 2837 MLX5_SET(tirc, tirc, transport_domain, priv->mdev->mlx5e_res.td.tdn);
3191e05f 2838
6a9764ef 2839 mlx5e_build_tir_ctx_lro(&priv->channels.params, tirc);
f62b8bb8 2840
4cbeaff5 2841 MLX5_SET(tirc, tirc, disp_type, MLX5_TIRC_DISP_TYPE_INDIRECT);
398f3351 2842 MLX5_SET(tirc, tirc, indirect_table, priv->indir_rqt.rqtn);
6a9764ef 2843 mlx5e_build_indir_tir_ctx_hash(&priv->channels.params, tt, tirc);
f62b8bb8
AV
2844}
2845
6a9764ef 2846static void mlx5e_build_direct_tir_ctx(struct mlx5e_priv *priv, u32 rqtn, u32 *tirc)
f62b8bb8 2847{
b50d292b 2848 MLX5_SET(tirc, tirc, transport_domain, priv->mdev->mlx5e_res.td.tdn);
1da36696 2849
6a9764ef 2850 mlx5e_build_tir_ctx_lro(&priv->channels.params, tirc);
1da36696
TT
2851
2852 MLX5_SET(tirc, tirc, disp_type, MLX5_TIRC_DISP_TYPE_INDIRECT);
2853 MLX5_SET(tirc, tirc, indirect_table, rqtn);
2854 MLX5_SET(tirc, tirc, rx_hash_fn, MLX5_RX_HASH_FN_INVERTED_XOR8);
2855}
2856
8f493ffd 2857int mlx5e_create_indirect_tirs(struct mlx5e_priv *priv)
1da36696 2858{
724b2aa1 2859 struct mlx5e_tir *tir;
f62b8bb8
AV
2860 void *tirc;
2861 int inlen;
2862 int err;
1da36696 2863 u32 *in;
1da36696 2864 int tt;
f62b8bb8
AV
2865
2866 inlen = MLX5_ST_SZ_BYTES(create_tir_in);
1b9a07ee 2867 in = kvzalloc(inlen, GFP_KERNEL);
f62b8bb8
AV
2868 if (!in)
2869 return -ENOMEM;
2870
1da36696
TT
2871 for (tt = 0; tt < MLX5E_NUM_INDIR_TIRS; tt++) {
2872 memset(in, 0, inlen);
724b2aa1 2873 tir = &priv->indir_tir[tt];
1da36696 2874 tirc = MLX5_ADDR_OF(create_tir_in, in, ctx);
6a9764ef 2875 mlx5e_build_indir_tir_ctx(priv, tt, tirc);
724b2aa1 2876 err = mlx5e_create_tir(priv->mdev, tir, in, inlen);
f62b8bb8 2877 if (err)
40ab6a6e 2878 goto err_destroy_tirs;
f62b8bb8
AV
2879 }
2880
6bfd390b
HHZ
2881 kvfree(in);
2882
2883 return 0;
2884
2885err_destroy_tirs:
8f493ffd 2886 mlx5_core_warn(priv->mdev, "create indirect tirs failed, %d\n", err);
6bfd390b
HHZ
2887 for (tt--; tt >= 0; tt--)
2888 mlx5e_destroy_tir(priv->mdev, &priv->indir_tir[tt]);
2889
2890 kvfree(in);
2891
2892 return err;
2893}
2894
cb67b832 2895int mlx5e_create_direct_tirs(struct mlx5e_priv *priv)
6bfd390b
HHZ
2896{
2897 int nch = priv->profile->max_nch(priv->mdev);
2898 struct mlx5e_tir *tir;
2899 void *tirc;
2900 int inlen;
2901 int err;
2902 u32 *in;
2903 int ix;
2904
2905 inlen = MLX5_ST_SZ_BYTES(create_tir_in);
1b9a07ee 2906 in = kvzalloc(inlen, GFP_KERNEL);
6bfd390b
HHZ
2907 if (!in)
2908 return -ENOMEM;
2909
1da36696
TT
2910 for (ix = 0; ix < nch; ix++) {
2911 memset(in, 0, inlen);
724b2aa1 2912 tir = &priv->direct_tir[ix];
1da36696 2913 tirc = MLX5_ADDR_OF(create_tir_in, in, ctx);
6a9764ef 2914 mlx5e_build_direct_tir_ctx(priv, priv->direct_tir[ix].rqt.rqtn, tirc);
724b2aa1 2915 err = mlx5e_create_tir(priv->mdev, tir, in, inlen);
1da36696
TT
2916 if (err)
2917 goto err_destroy_ch_tirs;
2918 }
2919
2920 kvfree(in);
2921
f62b8bb8
AV
2922 return 0;
2923
1da36696 2924err_destroy_ch_tirs:
8f493ffd 2925 mlx5_core_warn(priv->mdev, "create direct tirs failed, %d\n", err);
1da36696 2926 for (ix--; ix >= 0; ix--)
724b2aa1 2927 mlx5e_destroy_tir(priv->mdev, &priv->direct_tir[ix]);
1da36696 2928
1da36696 2929 kvfree(in);
f62b8bb8
AV
2930
2931 return err;
2932}
2933
8f493ffd 2934void mlx5e_destroy_indirect_tirs(struct mlx5e_priv *priv)
f62b8bb8
AV
2935{
2936 int i;
2937
1da36696 2938 for (i = 0; i < MLX5E_NUM_INDIR_TIRS; i++)
724b2aa1 2939 mlx5e_destroy_tir(priv->mdev, &priv->indir_tir[i]);
f62b8bb8
AV
2940}
2941
cb67b832 2942void mlx5e_destroy_direct_tirs(struct mlx5e_priv *priv)
6bfd390b
HHZ
2943{
2944 int nch = priv->profile->max_nch(priv->mdev);
2945 int i;
2946
2947 for (i = 0; i < nch; i++)
2948 mlx5e_destroy_tir(priv->mdev, &priv->direct_tir[i]);
2949}
2950
102722fc
GE
2951static int mlx5e_modify_channels_scatter_fcs(struct mlx5e_channels *chs, bool enable)
2952{
2953 int err = 0;
2954 int i;
2955
2956 for (i = 0; i < chs->num; i++) {
2957 err = mlx5e_modify_rq_scatter_fcs(&chs->c[i]->rq, enable);
2958 if (err)
2959 return err;
2960 }
2961
2962 return 0;
2963}
2964
f6d96a20 2965static int mlx5e_modify_channels_vsd(struct mlx5e_channels *chs, bool vsd)
36350114
GP
2966{
2967 int err = 0;
2968 int i;
2969
ff9c852f
SM
2970 for (i = 0; i < chs->num; i++) {
2971 err = mlx5e_modify_rq_vsd(&chs->c[i]->rq, vsd);
36350114
GP
2972 if (err)
2973 return err;
2974 }
2975
2976 return 0;
2977}
2978
08fb1dac
SM
2979static int mlx5e_setup_tc(struct net_device *netdev, u8 tc)
2980{
2981 struct mlx5e_priv *priv = netdev_priv(netdev);
6f9485af 2982 struct mlx5e_channels new_channels = {};
08fb1dac
SM
2983 int err = 0;
2984
2985 if (tc && tc != MLX5E_MAX_NUM_TC)
2986 return -EINVAL;
2987
2988 mutex_lock(&priv->state_lock);
2989
6f9485af
SM
2990 new_channels.params = priv->channels.params;
2991 new_channels.params.num_tc = tc ? tc : 1;
08fb1dac 2992
20b6a1c7 2993 if (!test_bit(MLX5E_STATE_OPENED, &priv->state)) {
6f9485af
SM
2994 priv->channels.params = new_channels.params;
2995 goto out;
2996 }
08fb1dac 2997
6f9485af
SM
2998 err = mlx5e_open_channels(priv, &new_channels);
2999 if (err)
3000 goto out;
08fb1dac 3001
2e20a151 3002 mlx5e_switch_priv_channels(priv, &new_channels, NULL);
6f9485af 3003out:
08fb1dac 3004 mutex_unlock(&priv->state_lock);
08fb1dac
SM
3005 return err;
3006}
3007
3008static int mlx5e_ndo_setup_tc(struct net_device *dev, u32 handle,
a5fcf8a6
JP
3009 u32 chain_index, __be16 proto,
3010 struct tc_to_netdev *tc)
08fb1dac 3011{
e8f887ac
AV
3012 struct mlx5e_priv *priv = netdev_priv(dev);
3013
3014 if (TC_H_MAJ(handle) != TC_H_MAJ(TC_H_INGRESS))
3015 goto mqprio;
3016
a5fcf8a6
JP
3017 if (chain_index)
3018 return -EOPNOTSUPP;
3019
e8f887ac 3020 switch (tc->type) {
e3a2b7ed
AV
3021 case TC_SETUP_CLSFLOWER:
3022 switch (tc->cls_flower->command) {
3023 case TC_CLSFLOWER_REPLACE:
3024 return mlx5e_configure_flower(priv, proto, tc->cls_flower);
3025 case TC_CLSFLOWER_DESTROY:
3026 return mlx5e_delete_flower(priv, tc->cls_flower);
aad7e08d
AV
3027 case TC_CLSFLOWER_STATS:
3028 return mlx5e_stats_flower(priv, tc->cls_flower);
e3a2b7ed 3029 }
e8f887ac
AV
3030 default:
3031 return -EOPNOTSUPP;
3032 }
3033
3034mqprio:
67ba422e 3035 if (tc->type != TC_SETUP_MQPRIO)
08fb1dac
SM
3036 return -EINVAL;
3037
56f36acd
AN
3038 tc->mqprio->hw = TC_MQPRIO_HW_OFFLOAD_TCS;
3039
3040 return mlx5e_setup_tc(dev, tc->mqprio->num_tc);
08fb1dac
SM
3041}
3042
bc1f4470 3043static void
f62b8bb8
AV
3044mlx5e_get_stats(struct net_device *dev, struct rtnl_link_stats64 *stats)
3045{
3046 struct mlx5e_priv *priv = netdev_priv(dev);
9218b44d 3047 struct mlx5e_sw_stats *sstats = &priv->stats.sw;
f62b8bb8 3048 struct mlx5e_vport_stats *vstats = &priv->stats.vport;
269e6b3a 3049 struct mlx5e_pport_stats *pstats = &priv->stats.pport;
f62b8bb8 3050
370bad0f
OG
3051 if (mlx5e_is_uplink_rep(priv)) {
3052 stats->rx_packets = PPORT_802_3_GET(pstats, a_frames_received_ok);
3053 stats->rx_bytes = PPORT_802_3_GET(pstats, a_octets_received_ok);
3054 stats->tx_packets = PPORT_802_3_GET(pstats, a_frames_transmitted_ok);
3055 stats->tx_bytes = PPORT_802_3_GET(pstats, a_octets_transmitted_ok);
3056 } else {
3057 stats->rx_packets = sstats->rx_packets;
3058 stats->rx_bytes = sstats->rx_bytes;
3059 stats->tx_packets = sstats->tx_packets;
3060 stats->tx_bytes = sstats->tx_bytes;
3061 stats->tx_dropped = sstats->tx_queue_dropped;
3062 }
269e6b3a
GP
3063
3064 stats->rx_dropped = priv->stats.qcnt.rx_out_of_buffer;
269e6b3a
GP
3065
3066 stats->rx_length_errors =
9218b44d
GP
3067 PPORT_802_3_GET(pstats, a_in_range_length_errors) +
3068 PPORT_802_3_GET(pstats, a_out_of_range_length_field) +
3069 PPORT_802_3_GET(pstats, a_frame_too_long_errors);
269e6b3a 3070 stats->rx_crc_errors =
9218b44d
GP
3071 PPORT_802_3_GET(pstats, a_frame_check_sequence_errors);
3072 stats->rx_frame_errors = PPORT_802_3_GET(pstats, a_alignment_errors);
3073 stats->tx_aborted_errors = PPORT_2863_GET(pstats, if_out_discards);
269e6b3a 3074 stats->tx_carrier_errors =
9218b44d 3075 PPORT_802_3_GET(pstats, a_symbol_error_during_carrier);
269e6b3a
GP
3076 stats->rx_errors = stats->rx_length_errors + stats->rx_crc_errors +
3077 stats->rx_frame_errors;
3078 stats->tx_errors = stats->tx_aborted_errors + stats->tx_carrier_errors;
3079
3080 /* vport multicast also counts packets that are dropped due to steering
3081 * or rx out of buffer
3082 */
9218b44d
GP
3083 stats->multicast =
3084 VPORT_COUNTER_GET(vstats, received_eth_multicast.packets);
f62b8bb8
AV
3085}
3086
3087static void mlx5e_set_rx_mode(struct net_device *dev)
3088{
3089 struct mlx5e_priv *priv = netdev_priv(dev);
3090
7bb29755 3091 queue_work(priv->wq, &priv->set_rx_mode_work);
f62b8bb8
AV
3092}
3093
3094static int mlx5e_set_mac(struct net_device *netdev, void *addr)
3095{
3096 struct mlx5e_priv *priv = netdev_priv(netdev);
3097 struct sockaddr *saddr = addr;
3098
3099 if (!is_valid_ether_addr(saddr->sa_data))
3100 return -EADDRNOTAVAIL;
3101
3102 netif_addr_lock_bh(netdev);
3103 ether_addr_copy(netdev->dev_addr, saddr->sa_data);
3104 netif_addr_unlock_bh(netdev);
3105
7bb29755 3106 queue_work(priv->wq, &priv->set_rx_mode_work);
f62b8bb8
AV
3107
3108 return 0;
3109}
3110
0e405443
GP
3111#define MLX5E_SET_FEATURE(netdev, feature, enable) \
3112 do { \
3113 if (enable) \
3114 netdev->features |= feature; \
3115 else \
3116 netdev->features &= ~feature; \
3117 } while (0)
3118
3119typedef int (*mlx5e_feature_handler)(struct net_device *netdev, bool enable);
3120
3121static int set_feature_lro(struct net_device *netdev, bool enable)
f62b8bb8
AV
3122{
3123 struct mlx5e_priv *priv = netdev_priv(netdev);
2e20a151
SM
3124 struct mlx5e_channels new_channels = {};
3125 int err = 0;
3126 bool reset;
f62b8bb8
AV
3127
3128 mutex_lock(&priv->state_lock);
f62b8bb8 3129
2e20a151
SM
3130 reset = (priv->channels.params.rq_wq_type == MLX5_WQ_TYPE_LINKED_LIST);
3131 reset = reset && test_bit(MLX5E_STATE_OPENED, &priv->state);
98e81b0a 3132
2e20a151
SM
3133 new_channels.params = priv->channels.params;
3134 new_channels.params.lro_en = enable;
3135
3136 if (!reset) {
3137 priv->channels.params = new_channels.params;
3138 err = mlx5e_modify_tirs_lro(priv);
3139 goto out;
98e81b0a 3140 }
f62b8bb8 3141
2e20a151
SM
3142 err = mlx5e_open_channels(priv, &new_channels);
3143 if (err)
3144 goto out;
0e405443 3145
2e20a151
SM
3146 mlx5e_switch_priv_channels(priv, &new_channels, mlx5e_modify_tirs_lro);
3147out:
9b37b07f 3148 mutex_unlock(&priv->state_lock);
0e405443
GP
3149 return err;
3150}
3151
3152static int set_feature_vlan_filter(struct net_device *netdev, bool enable)
3153{
3154 struct mlx5e_priv *priv = netdev_priv(netdev);
3155
3156 if (enable)
3157 mlx5e_enable_vlan_filter(priv);
3158 else
3159 mlx5e_disable_vlan_filter(priv);
3160
3161 return 0;
3162}
3163
3164static int set_feature_tc_num_filters(struct net_device *netdev, bool enable)
3165{
3166 struct mlx5e_priv *priv = netdev_priv(netdev);
f62b8bb8 3167
0e405443 3168 if (!enable && mlx5e_tc_num_filters(priv)) {
e8f887ac
AV
3169 netdev_err(netdev,
3170 "Active offloaded tc filters, can't turn hw_tc_offload off\n");
3171 return -EINVAL;
3172 }
3173
0e405443
GP
3174 return 0;
3175}
3176
94cb1ebb
EBE
3177static int set_feature_rx_all(struct net_device *netdev, bool enable)
3178{
3179 struct mlx5e_priv *priv = netdev_priv(netdev);
3180 struct mlx5_core_dev *mdev = priv->mdev;
3181
3182 return mlx5_set_port_fcs(mdev, !enable);
3183}
3184
102722fc
GE
3185static int set_feature_rx_fcs(struct net_device *netdev, bool enable)
3186{
3187 struct mlx5e_priv *priv = netdev_priv(netdev);
3188 int err;
3189
3190 mutex_lock(&priv->state_lock);
3191
3192 priv->channels.params.scatter_fcs_en = enable;
3193 err = mlx5e_modify_channels_scatter_fcs(&priv->channels, enable);
3194 if (err)
3195 priv->channels.params.scatter_fcs_en = !enable;
3196
3197 mutex_unlock(&priv->state_lock);
3198
3199 return err;
3200}
3201
36350114
GP
3202static int set_feature_rx_vlan(struct net_device *netdev, bool enable)
3203{
3204 struct mlx5e_priv *priv = netdev_priv(netdev);
ff9c852f 3205 int err = 0;
36350114
GP
3206
3207 mutex_lock(&priv->state_lock);
3208
6a9764ef 3209 priv->channels.params.vlan_strip_disable = !enable;
ff9c852f
SM
3210 if (!test_bit(MLX5E_STATE_OPENED, &priv->state))
3211 goto unlock;
3212
3213 err = mlx5e_modify_channels_vsd(&priv->channels, !enable);
36350114 3214 if (err)
6a9764ef 3215 priv->channels.params.vlan_strip_disable = enable;
36350114 3216
ff9c852f 3217unlock:
36350114
GP
3218 mutex_unlock(&priv->state_lock);
3219
3220 return err;
3221}
3222
45bf454a
MG
3223#ifdef CONFIG_RFS_ACCEL
3224static int set_feature_arfs(struct net_device *netdev, bool enable)
3225{
3226 struct mlx5e_priv *priv = netdev_priv(netdev);
3227 int err;
3228
3229 if (enable)
3230 err = mlx5e_arfs_enable(priv);
3231 else
3232 err = mlx5e_arfs_disable(priv);
3233
3234 return err;
3235}
3236#endif
3237
0e405443
GP
3238static int mlx5e_handle_feature(struct net_device *netdev,
3239 netdev_features_t wanted_features,
3240 netdev_features_t feature,
3241 mlx5e_feature_handler feature_handler)
3242{
3243 netdev_features_t changes = wanted_features ^ netdev->features;
3244 bool enable = !!(wanted_features & feature);
3245 int err;
3246
3247 if (!(changes & feature))
3248 return 0;
3249
3250 err = feature_handler(netdev, enable);
3251 if (err) {
3252 netdev_err(netdev, "%s feature 0x%llx failed err %d\n",
3253 enable ? "Enable" : "Disable", feature, err);
3254 return err;
3255 }
3256
3257 MLX5E_SET_FEATURE(netdev, feature, enable);
3258 return 0;
3259}
3260
3261static int mlx5e_set_features(struct net_device *netdev,
3262 netdev_features_t features)
3263{
3264 int err;
3265
3266 err = mlx5e_handle_feature(netdev, features, NETIF_F_LRO,
3267 set_feature_lro);
3268 err |= mlx5e_handle_feature(netdev, features,
3269 NETIF_F_HW_VLAN_CTAG_FILTER,
3270 set_feature_vlan_filter);
3271 err |= mlx5e_handle_feature(netdev, features, NETIF_F_HW_TC,
3272 set_feature_tc_num_filters);
94cb1ebb
EBE
3273 err |= mlx5e_handle_feature(netdev, features, NETIF_F_RXALL,
3274 set_feature_rx_all);
102722fc
GE
3275 err |= mlx5e_handle_feature(netdev, features, NETIF_F_RXFCS,
3276 set_feature_rx_fcs);
36350114
GP
3277 err |= mlx5e_handle_feature(netdev, features, NETIF_F_HW_VLAN_CTAG_RX,
3278 set_feature_rx_vlan);
45bf454a
MG
3279#ifdef CONFIG_RFS_ACCEL
3280 err |= mlx5e_handle_feature(netdev, features, NETIF_F_NTUPLE,
3281 set_feature_arfs);
3282#endif
0e405443
GP
3283
3284 return err ? -EINVAL : 0;
f62b8bb8
AV
3285}
3286
3287static int mlx5e_change_mtu(struct net_device *netdev, int new_mtu)
3288{
3289 struct mlx5e_priv *priv = netdev_priv(netdev);
2e20a151
SM
3290 struct mlx5e_channels new_channels = {};
3291 int curr_mtu;
98e81b0a 3292 int err = 0;
506753b0 3293 bool reset;
f62b8bb8 3294
f62b8bb8 3295 mutex_lock(&priv->state_lock);
98e81b0a 3296
6a9764ef
SM
3297 reset = !priv->channels.params.lro_en &&
3298 (priv->channels.params.rq_wq_type !=
506753b0
TT
3299 MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ);
3300
2e20a151 3301 reset = reset && test_bit(MLX5E_STATE_OPENED, &priv->state);
98e81b0a 3302
2e20a151 3303 curr_mtu = netdev->mtu;
f62b8bb8 3304 netdev->mtu = new_mtu;
98e81b0a 3305
2e20a151
SM
3306 if (!reset) {
3307 mlx5e_set_dev_port_mtu(priv);
3308 goto out;
3309 }
98e81b0a 3310
2e20a151
SM
3311 new_channels.params = priv->channels.params;
3312 err = mlx5e_open_channels(priv, &new_channels);
3313 if (err) {
3314 netdev->mtu = curr_mtu;
3315 goto out;
3316 }
3317
3318 mlx5e_switch_priv_channels(priv, &new_channels, mlx5e_set_dev_port_mtu);
f62b8bb8 3319
2e20a151
SM
3320out:
3321 mutex_unlock(&priv->state_lock);
f62b8bb8
AV
3322 return err;
3323}
3324
ef9814de
EBE
3325static int mlx5e_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
3326{
1170fbd8
FD
3327 struct mlx5e_priv *priv = netdev_priv(dev);
3328
ef9814de
EBE
3329 switch (cmd) {
3330 case SIOCSHWTSTAMP:
1170fbd8 3331 return mlx5e_hwstamp_set(priv, ifr);
ef9814de 3332 case SIOCGHWTSTAMP:
1170fbd8 3333 return mlx5e_hwstamp_get(priv, ifr);
ef9814de
EBE
3334 default:
3335 return -EOPNOTSUPP;
3336 }
3337}
3338
66e49ded
SM
3339static int mlx5e_set_vf_mac(struct net_device *dev, int vf, u8 *mac)
3340{
3341 struct mlx5e_priv *priv = netdev_priv(dev);
3342 struct mlx5_core_dev *mdev = priv->mdev;
3343
3344 return mlx5_eswitch_set_vport_mac(mdev->priv.eswitch, vf + 1, mac);
3345}
3346
79aab093
MS
3347static int mlx5e_set_vf_vlan(struct net_device *dev, int vf, u16 vlan, u8 qos,
3348 __be16 vlan_proto)
66e49ded
SM
3349{
3350 struct mlx5e_priv *priv = netdev_priv(dev);
3351 struct mlx5_core_dev *mdev = priv->mdev;
3352
79aab093
MS
3353 if (vlan_proto != htons(ETH_P_8021Q))
3354 return -EPROTONOSUPPORT;
3355
66e49ded
SM
3356 return mlx5_eswitch_set_vport_vlan(mdev->priv.eswitch, vf + 1,
3357 vlan, qos);
3358}
3359
f942380c
MHY
3360static int mlx5e_set_vf_spoofchk(struct net_device *dev, int vf, bool setting)
3361{
3362 struct mlx5e_priv *priv = netdev_priv(dev);
3363 struct mlx5_core_dev *mdev = priv->mdev;
3364
3365 return mlx5_eswitch_set_vport_spoofchk(mdev->priv.eswitch, vf + 1, setting);
3366}
3367
1edc57e2
MHY
3368static int mlx5e_set_vf_trust(struct net_device *dev, int vf, bool setting)
3369{
3370 struct mlx5e_priv *priv = netdev_priv(dev);
3371 struct mlx5_core_dev *mdev = priv->mdev;
3372
3373 return mlx5_eswitch_set_vport_trust(mdev->priv.eswitch, vf + 1, setting);
3374}
bd77bf1c
MHY
3375
3376static int mlx5e_set_vf_rate(struct net_device *dev, int vf, int min_tx_rate,
3377 int max_tx_rate)
3378{
3379 struct mlx5e_priv *priv = netdev_priv(dev);
3380 struct mlx5_core_dev *mdev = priv->mdev;
3381
bd77bf1c 3382 return mlx5_eswitch_set_vport_rate(mdev->priv.eswitch, vf + 1,
c9497c98 3383 max_tx_rate, min_tx_rate);
bd77bf1c
MHY
3384}
3385
66e49ded
SM
3386static int mlx5_vport_link2ifla(u8 esw_link)
3387{
3388 switch (esw_link) {
3389 case MLX5_ESW_VPORT_ADMIN_STATE_DOWN:
3390 return IFLA_VF_LINK_STATE_DISABLE;
3391 case MLX5_ESW_VPORT_ADMIN_STATE_UP:
3392 return IFLA_VF_LINK_STATE_ENABLE;
3393 }
3394 return IFLA_VF_LINK_STATE_AUTO;
3395}
3396
3397static int mlx5_ifla_link2vport(u8 ifla_link)
3398{
3399 switch (ifla_link) {
3400 case IFLA_VF_LINK_STATE_DISABLE:
3401 return MLX5_ESW_VPORT_ADMIN_STATE_DOWN;
3402 case IFLA_VF_LINK_STATE_ENABLE:
3403 return MLX5_ESW_VPORT_ADMIN_STATE_UP;
3404 }
3405 return MLX5_ESW_VPORT_ADMIN_STATE_AUTO;
3406}
3407
3408static int mlx5e_set_vf_link_state(struct net_device *dev, int vf,
3409 int link_state)
3410{
3411 struct mlx5e_priv *priv = netdev_priv(dev);
3412 struct mlx5_core_dev *mdev = priv->mdev;
3413
3414 return mlx5_eswitch_set_vport_state(mdev->priv.eswitch, vf + 1,
3415 mlx5_ifla_link2vport(link_state));
3416}
3417
3418static int mlx5e_get_vf_config(struct net_device *dev,
3419 int vf, struct ifla_vf_info *ivi)
3420{
3421 struct mlx5e_priv *priv = netdev_priv(dev);
3422 struct mlx5_core_dev *mdev = priv->mdev;
3423 int err;
3424
3425 err = mlx5_eswitch_get_vport_config(mdev->priv.eswitch, vf + 1, ivi);
3426 if (err)
3427 return err;
3428 ivi->linkstate = mlx5_vport_link2ifla(ivi->linkstate);
3429 return 0;
3430}
3431
3432static int mlx5e_get_vf_stats(struct net_device *dev,
3433 int vf, struct ifla_vf_stats *vf_stats)
3434{
3435 struct mlx5e_priv *priv = netdev_priv(dev);
3436 struct mlx5_core_dev *mdev = priv->mdev;
3437
3438 return mlx5_eswitch_get_vport_stats(mdev->priv.eswitch, vf + 1,
3439 vf_stats);
3440}
3441
1ad9a00a
PB
3442static void mlx5e_add_vxlan_port(struct net_device *netdev,
3443 struct udp_tunnel_info *ti)
b3f63c3d
MF
3444{
3445 struct mlx5e_priv *priv = netdev_priv(netdev);
3446
974c3f30
AD
3447 if (ti->type != UDP_TUNNEL_TYPE_VXLAN)
3448 return;
3449
b3f63c3d
MF
3450 if (!mlx5e_vxlan_allowed(priv->mdev))
3451 return;
3452
974c3f30 3453 mlx5e_vxlan_queue_work(priv, ti->sa_family, be16_to_cpu(ti->port), 1);
b3f63c3d
MF
3454}
3455
1ad9a00a
PB
3456static void mlx5e_del_vxlan_port(struct net_device *netdev,
3457 struct udp_tunnel_info *ti)
b3f63c3d
MF
3458{
3459 struct mlx5e_priv *priv = netdev_priv(netdev);
3460
974c3f30
AD
3461 if (ti->type != UDP_TUNNEL_TYPE_VXLAN)
3462 return;
3463
b3f63c3d
MF
3464 if (!mlx5e_vxlan_allowed(priv->mdev))
3465 return;
3466
974c3f30 3467 mlx5e_vxlan_queue_work(priv, ti->sa_family, be16_to_cpu(ti->port), 0);
b3f63c3d
MF
3468}
3469
3470static netdev_features_t mlx5e_vxlan_features_check(struct mlx5e_priv *priv,
3471 struct sk_buff *skb,
3472 netdev_features_t features)
3473{
3474 struct udphdr *udph;
3475 u16 proto;
3476 u16 port = 0;
3477
3478 switch (vlan_get_protocol(skb)) {
3479 case htons(ETH_P_IP):
3480 proto = ip_hdr(skb)->protocol;
3481 break;
3482 case htons(ETH_P_IPV6):
3483 proto = ipv6_hdr(skb)->nexthdr;
3484 break;
3485 default:
3486 goto out;
3487 }
3488
3489 if (proto == IPPROTO_UDP) {
3490 udph = udp_hdr(skb);
3491 port = be16_to_cpu(udph->dest);
3492 }
3493
3494 /* Verify if UDP port is being offloaded by HW */
3495 if (port && mlx5e_vxlan_lookup_port(priv, port))
3496 return features;
3497
3498out:
3499 /* Disable CSUM and GSO if the udp dport is not offloaded by HW */
3500 return features & ~(NETIF_F_CSUM_MASK | NETIF_F_GSO_MASK);
3501}
3502
3503static netdev_features_t mlx5e_features_check(struct sk_buff *skb,
3504 struct net_device *netdev,
3505 netdev_features_t features)
3506{
3507 struct mlx5e_priv *priv = netdev_priv(netdev);
3508
3509 features = vlan_features_check(skb, features);
3510 features = vxlan_features_check(skb, features);
3511
3512 /* Validate if the tunneled packet is being offloaded by HW */
3513 if (skb->encapsulation &&
3514 (features & NETIF_F_CSUM_MASK || features & NETIF_F_GSO_MASK))
3515 return mlx5e_vxlan_features_check(priv, skb, features);
3516
3517 return features;
3518}
3519
3947ca18
DJ
3520static void mlx5e_tx_timeout(struct net_device *dev)
3521{
3522 struct mlx5e_priv *priv = netdev_priv(dev);
3523 bool sched_work = false;
3524 int i;
3525
3526 netdev_err(dev, "TX timeout detected\n");
3527
6a9764ef 3528 for (i = 0; i < priv->channels.num * priv->channels.params.num_tc; i++) {
acc6c595 3529 struct mlx5e_txqsq *sq = priv->txq2sq[i];
3947ca18 3530
2c1ccc99 3531 if (!netif_xmit_stopped(netdev_get_tx_queue(dev, i)))
3947ca18
DJ
3532 continue;
3533 sched_work = true;
c0f1147d 3534 clear_bit(MLX5E_SQ_STATE_ENABLED, &sq->state);
3947ca18
DJ
3535 netdev_err(dev, "TX timeout on queue: %d, SQ: 0x%x, CQ: 0x%x, SQ Cons: 0x%x SQ Prod: 0x%x\n",
3536 i, sq->sqn, sq->cq.mcq.cqn, sq->cc, sq->pc);
3537 }
3538
3539 if (sched_work && test_bit(MLX5E_STATE_OPENED, &priv->state))
3540 schedule_work(&priv->tx_timeout_work);
3541}
3542
86994156
RS
3543static int mlx5e_xdp_set(struct net_device *netdev, struct bpf_prog *prog)
3544{
3545 struct mlx5e_priv *priv = netdev_priv(netdev);
3546 struct bpf_prog *old_prog;
3547 int err = 0;
3548 bool reset, was_opened;
3549 int i;
3550
3551 mutex_lock(&priv->state_lock);
3552
3553 if ((netdev->features & NETIF_F_LRO) && prog) {
3554 netdev_warn(netdev, "can't set XDP while LRO is on, disable LRO first\n");
3555 err = -EINVAL;
3556 goto unlock;
3557 }
3558
547eede0
IT
3559 if ((netdev->features & NETIF_F_HW_ESP) && prog) {
3560 netdev_warn(netdev, "can't set XDP with IPSec offload\n");
3561 err = -EINVAL;
3562 goto unlock;
3563 }
3564
86994156
RS
3565 was_opened = test_bit(MLX5E_STATE_OPENED, &priv->state);
3566 /* no need for full reset when exchanging programs */
6a9764ef 3567 reset = (!priv->channels.params.xdp_prog || !prog);
86994156
RS
3568
3569 if (was_opened && reset)
3570 mlx5e_close_locked(netdev);
c54c0629
DB
3571 if (was_opened && !reset) {
3572 /* num_channels is invariant here, so we can take the
3573 * batched reference right upfront.
3574 */
6a9764ef 3575 prog = bpf_prog_add(prog, priv->channels.num);
c54c0629
DB
3576 if (IS_ERR(prog)) {
3577 err = PTR_ERR(prog);
3578 goto unlock;
3579 }
3580 }
86994156 3581
c54c0629
DB
3582 /* exchange programs, extra prog reference we got from caller
3583 * as long as we don't fail from this point onwards.
3584 */
6a9764ef 3585 old_prog = xchg(&priv->channels.params.xdp_prog, prog);
86994156
RS
3586 if (old_prog)
3587 bpf_prog_put(old_prog);
3588
3589 if (reset) /* change RQ type according to priv->xdp_prog */
6a9764ef 3590 mlx5e_set_rq_params(priv->mdev, &priv->channels.params);
86994156
RS
3591
3592 if (was_opened && reset)
3593 mlx5e_open_locked(netdev);
3594
3595 if (!test_bit(MLX5E_STATE_OPENED, &priv->state) || reset)
3596 goto unlock;
3597
3598 /* exchanging programs w/o reset, we update ref counts on behalf
3599 * of the channels RQs here.
3600 */
ff9c852f
SM
3601 for (i = 0; i < priv->channels.num; i++) {
3602 struct mlx5e_channel *c = priv->channels.c[i];
86994156 3603
c0f1147d 3604 clear_bit(MLX5E_RQ_STATE_ENABLED, &c->rq.state);
86994156
RS
3605 napi_synchronize(&c->napi);
3606 /* prevent mlx5e_poll_rx_cq from accessing rq->xdp_prog */
3607
3608 old_prog = xchg(&c->rq.xdp_prog, prog);
3609
c0f1147d 3610 set_bit(MLX5E_RQ_STATE_ENABLED, &c->rq.state);
86994156
RS
3611 /* napi_schedule in case we have missed anything */
3612 set_bit(MLX5E_CHANNEL_NAPI_SCHED, &c->flags);
3613 napi_schedule(&c->napi);
3614
3615 if (old_prog)
3616 bpf_prog_put(old_prog);
3617 }
3618
3619unlock:
3620 mutex_unlock(&priv->state_lock);
3621 return err;
3622}
3623
821b2e29 3624static u32 mlx5e_xdp_query(struct net_device *dev)
86994156
RS
3625{
3626 struct mlx5e_priv *priv = netdev_priv(dev);
821b2e29
MKL
3627 const struct bpf_prog *xdp_prog;
3628 u32 prog_id = 0;
86994156 3629
821b2e29
MKL
3630 mutex_lock(&priv->state_lock);
3631 xdp_prog = priv->channels.params.xdp_prog;
3632 if (xdp_prog)
3633 prog_id = xdp_prog->aux->id;
3634 mutex_unlock(&priv->state_lock);
3635
3636 return prog_id;
86994156
RS
3637}
3638
3639static int mlx5e_xdp(struct net_device *dev, struct netdev_xdp *xdp)
3640{
3641 switch (xdp->command) {
3642 case XDP_SETUP_PROG:
3643 return mlx5e_xdp_set(dev, xdp->prog);
3644 case XDP_QUERY_PROG:
821b2e29
MKL
3645 xdp->prog_id = mlx5e_xdp_query(dev);
3646 xdp->prog_attached = !!xdp->prog_id;
86994156
RS
3647 return 0;
3648 default:
3649 return -EINVAL;
3650 }
3651}
3652
80378384
CO
3653#ifdef CONFIG_NET_POLL_CONTROLLER
3654/* Fake "interrupt" called by netpoll (eg netconsole) to send skbs without
3655 * reenabling interrupts.
3656 */
3657static void mlx5e_netpoll(struct net_device *dev)
3658{
3659 struct mlx5e_priv *priv = netdev_priv(dev);
ff9c852f
SM
3660 struct mlx5e_channels *chs = &priv->channels;
3661
80378384
CO
3662 int i;
3663
ff9c852f
SM
3664 for (i = 0; i < chs->num; i++)
3665 napi_schedule(&chs->c[i]->napi);
80378384
CO
3666}
3667#endif
3668
b0eed40e 3669static const struct net_device_ops mlx5e_netdev_ops_basic = {
f62b8bb8
AV
3670 .ndo_open = mlx5e_open,
3671 .ndo_stop = mlx5e_close,
3672 .ndo_start_xmit = mlx5e_xmit,
08fb1dac
SM
3673 .ndo_setup_tc = mlx5e_ndo_setup_tc,
3674 .ndo_select_queue = mlx5e_select_queue,
f62b8bb8
AV
3675 .ndo_get_stats64 = mlx5e_get_stats,
3676 .ndo_set_rx_mode = mlx5e_set_rx_mode,
3677 .ndo_set_mac_address = mlx5e_set_mac,
b0eed40e
SM
3678 .ndo_vlan_rx_add_vid = mlx5e_vlan_rx_add_vid,
3679 .ndo_vlan_rx_kill_vid = mlx5e_vlan_rx_kill_vid,
f62b8bb8 3680 .ndo_set_features = mlx5e_set_features,
b0eed40e
SM
3681 .ndo_change_mtu = mlx5e_change_mtu,
3682 .ndo_do_ioctl = mlx5e_ioctl,
507f0c81 3683 .ndo_set_tx_maxrate = mlx5e_set_tx_maxrate,
45bf454a
MG
3684#ifdef CONFIG_RFS_ACCEL
3685 .ndo_rx_flow_steer = mlx5e_rx_flow_steer,
3686#endif
3947ca18 3687 .ndo_tx_timeout = mlx5e_tx_timeout,
86994156 3688 .ndo_xdp = mlx5e_xdp,
80378384
CO
3689#ifdef CONFIG_NET_POLL_CONTROLLER
3690 .ndo_poll_controller = mlx5e_netpoll,
3691#endif
b0eed40e
SM
3692};
3693
3694static const struct net_device_ops mlx5e_netdev_ops_sriov = {
3695 .ndo_open = mlx5e_open,
3696 .ndo_stop = mlx5e_close,
3697 .ndo_start_xmit = mlx5e_xmit,
08fb1dac
SM
3698 .ndo_setup_tc = mlx5e_ndo_setup_tc,
3699 .ndo_select_queue = mlx5e_select_queue,
b0eed40e
SM
3700 .ndo_get_stats64 = mlx5e_get_stats,
3701 .ndo_set_rx_mode = mlx5e_set_rx_mode,
3702 .ndo_set_mac_address = mlx5e_set_mac,
3703 .ndo_vlan_rx_add_vid = mlx5e_vlan_rx_add_vid,
3704 .ndo_vlan_rx_kill_vid = mlx5e_vlan_rx_kill_vid,
3705 .ndo_set_features = mlx5e_set_features,
3706 .ndo_change_mtu = mlx5e_change_mtu,
3707 .ndo_do_ioctl = mlx5e_ioctl,
974c3f30
AD
3708 .ndo_udp_tunnel_add = mlx5e_add_vxlan_port,
3709 .ndo_udp_tunnel_del = mlx5e_del_vxlan_port,
507f0c81 3710 .ndo_set_tx_maxrate = mlx5e_set_tx_maxrate,
b3f63c3d 3711 .ndo_features_check = mlx5e_features_check,
45bf454a
MG
3712#ifdef CONFIG_RFS_ACCEL
3713 .ndo_rx_flow_steer = mlx5e_rx_flow_steer,
3714#endif
b0eed40e
SM
3715 .ndo_set_vf_mac = mlx5e_set_vf_mac,
3716 .ndo_set_vf_vlan = mlx5e_set_vf_vlan,
f942380c 3717 .ndo_set_vf_spoofchk = mlx5e_set_vf_spoofchk,
1edc57e2 3718 .ndo_set_vf_trust = mlx5e_set_vf_trust,
bd77bf1c 3719 .ndo_set_vf_rate = mlx5e_set_vf_rate,
b0eed40e
SM
3720 .ndo_get_vf_config = mlx5e_get_vf_config,
3721 .ndo_set_vf_link_state = mlx5e_set_vf_link_state,
3722 .ndo_get_vf_stats = mlx5e_get_vf_stats,
3947ca18 3723 .ndo_tx_timeout = mlx5e_tx_timeout,
86994156 3724 .ndo_xdp = mlx5e_xdp,
80378384
CO
3725#ifdef CONFIG_NET_POLL_CONTROLLER
3726 .ndo_poll_controller = mlx5e_netpoll,
3727#endif
370bad0f
OG
3728 .ndo_has_offload_stats = mlx5e_has_offload_stats,
3729 .ndo_get_offload_stats = mlx5e_get_offload_stats,
f62b8bb8
AV
3730};
3731
3732static int mlx5e_check_required_hca_cap(struct mlx5_core_dev *mdev)
3733{
3734 if (MLX5_CAP_GEN(mdev, port_type) != MLX5_CAP_PORT_TYPE_ETH)
9eb78923 3735 return -EOPNOTSUPP;
f62b8bb8
AV
3736 if (!MLX5_CAP_GEN(mdev, eth_net_offloads) ||
3737 !MLX5_CAP_GEN(mdev, nic_flow_table) ||
3738 !MLX5_CAP_ETH(mdev, csum_cap) ||
3739 !MLX5_CAP_ETH(mdev, max_lso_cap) ||
3740 !MLX5_CAP_ETH(mdev, vlan_cap) ||
796a27ec
GP
3741 !MLX5_CAP_ETH(mdev, rss_ind_tbl_cap) ||
3742 MLX5_CAP_FLOWTABLE(mdev,
3743 flow_table_properties_nic_receive.max_ft_level)
3744 < 3) {
f62b8bb8
AV
3745 mlx5_core_warn(mdev,
3746 "Not creating net device, some required device capabilities are missing\n");
9eb78923 3747 return -EOPNOTSUPP;
f62b8bb8 3748 }
66189961
TT
3749 if (!MLX5_CAP_ETH(mdev, self_lb_en_modifiable))
3750 mlx5_core_warn(mdev, "Self loop back prevention is not supported\n");
7524a5d8 3751 if (!MLX5_CAP_GEN(mdev, cq_moderation))
3e432ab6 3752 mlx5_core_warn(mdev, "CQ moderation is not supported\n");
66189961 3753
f62b8bb8
AV
3754 return 0;
3755}
3756
58d52291
AS
3757u16 mlx5e_get_max_inline_cap(struct mlx5_core_dev *mdev)
3758{
3759 int bf_buf_size = (1 << MLX5_CAP_GEN(mdev, log_bf_reg_size)) / 2;
3760
3761 return bf_buf_size -
3762 sizeof(struct mlx5e_tx_wqe) +
3763 2 /*sizeof(mlx5e_tx_wqe.inline_hdr_start)*/;
3764}
3765
d8c9660d
TT
3766void mlx5e_build_default_indir_rqt(struct mlx5_core_dev *mdev,
3767 u32 *indirection_rqt, int len,
85082dba
TT
3768 int num_channels)
3769{
d8c9660d
TT
3770 int node = mdev->priv.numa_node;
3771 int node_num_of_cores;
85082dba
TT
3772 int i;
3773
d8c9660d
TT
3774 if (node == -1)
3775 node = first_online_node;
3776
3777 node_num_of_cores = cpumask_weight(cpumask_of_node(node));
3778
3779 if (node_num_of_cores)
3780 num_channels = min_t(int, num_channels, node_num_of_cores);
3781
85082dba
TT
3782 for (i = 0; i < len; i++)
3783 indirection_rqt[i] = i % num_channels;
3784}
3785
b797a684
SM
3786static int mlx5e_get_pci_bw(struct mlx5_core_dev *mdev, u32 *pci_bw)
3787{
3788 enum pcie_link_width width;
3789 enum pci_bus_speed speed;
3790 int err = 0;
3791
3792 err = pcie_get_minimum_link(mdev->pdev, &speed, &width);
3793 if (err)
3794 return err;
3795
3796 if (speed == PCI_SPEED_UNKNOWN || width == PCIE_LNK_WIDTH_UNKNOWN)
3797 return -EINVAL;
3798
3799 switch (speed) {
3800 case PCIE_SPEED_2_5GT:
3801 *pci_bw = 2500 * width;
3802 break;
3803 case PCIE_SPEED_5_0GT:
3804 *pci_bw = 5000 * width;
3805 break;
3806 case PCIE_SPEED_8_0GT:
3807 *pci_bw = 8000 * width;
3808 break;
3809 default:
3810 return -EINVAL;
3811 }
3812
3813 return 0;
3814}
3815
3816static bool cqe_compress_heuristic(u32 link_speed, u32 pci_bw)
3817{
3818 return (link_speed && pci_bw &&
3819 (pci_bw < 40000) && (pci_bw < link_speed));
3820}
3821
0f6e4cf6
EBE
3822static bool hw_lro_heuristic(u32 link_speed, u32 pci_bw)
3823{
3824 return !(link_speed && pci_bw &&
3825 (pci_bw <= 16000) && (pci_bw < link_speed));
3826}
3827
9908aa29
TT
3828void mlx5e_set_rx_cq_mode_params(struct mlx5e_params *params, u8 cq_period_mode)
3829{
3830 params->rx_cq_period_mode = cq_period_mode;
3831
3832 params->rx_cq_moderation.pkts =
3833 MLX5E_PARAMS_DEFAULT_RX_CQ_MODERATION_PKTS;
3834 params->rx_cq_moderation.usec =
3835 MLX5E_PARAMS_DEFAULT_RX_CQ_MODERATION_USEC;
3836
3837 if (cq_period_mode == MLX5_CQ_PERIOD_MODE_START_FROM_CQE)
3838 params->rx_cq_moderation.usec =
3839 MLX5E_PARAMS_DEFAULT_RX_CQ_MODERATION_USEC_FROM_CQE;
6a9764ef 3840
457fcd8a
SM
3841 if (params->rx_am_enabled)
3842 params->rx_cq_moderation =
3843 mlx5e_am_get_def_profile(params->rx_cq_period_mode);
3844
6a9764ef
SM
3845 MLX5E_SET_PFLAG(params, MLX5E_PFLAG_RX_CQE_BASED_MODER,
3846 params->rx_cq_period_mode == MLX5_CQ_PERIOD_MODE_START_FROM_CQE);
9908aa29
TT
3847}
3848
2b029556
SM
3849u32 mlx5e_choose_lro_timeout(struct mlx5_core_dev *mdev, u32 wanted_timeout)
3850{
3851 int i;
3852
3853 /* The supported periods are organized in ascending order */
3854 for (i = 0; i < MLX5E_LRO_TIMEOUT_ARR_SIZE - 1; i++)
3855 if (MLX5_CAP_ETH(mdev, lro_timer_supported_periods[i]) >= wanted_timeout)
3856 break;
3857
3858 return MLX5_CAP_ETH(mdev, lro_timer_supported_periods[i]);
3859}
3860
8f493ffd
SM
3861void mlx5e_build_nic_params(struct mlx5_core_dev *mdev,
3862 struct mlx5e_params *params,
3863 u16 max_channels)
f62b8bb8 3864{
6a9764ef 3865 u8 cq_period_mode = 0;
b797a684
SM
3866 u32 link_speed = 0;
3867 u32 pci_bw = 0;
2fc4bfb7 3868
6a9764ef
SM
3869 params->num_channels = max_channels;
3870 params->num_tc = 1;
2b029556 3871
0f6e4cf6
EBE
3872 mlx5e_get_max_linkspeed(mdev, &link_speed);
3873 mlx5e_get_pci_bw(mdev, &pci_bw);
3874 mlx5_core_dbg(mdev, "Max link speed = %d, PCI BW = %d\n",
3875 link_speed, pci_bw);
3876
6a9764ef
SM
3877 /* SQ */
3878 params->log_sq_size = is_kdump_kernel() ?
b4e029da
KH
3879 MLX5E_PARAMS_MINIMUM_LOG_SQ_SIZE :
3880 MLX5E_PARAMS_DEFAULT_LOG_SQ_SIZE;
461017cb 3881
b797a684 3882 /* set CQE compression */
6a9764ef 3883 params->rx_cqe_compress_def = false;
b797a684 3884 if (MLX5_CAP_GEN(mdev, cqe_compression) &&
e53eef63 3885 MLX5_CAP_GEN(mdev, vport_group_manager))
6a9764ef 3886 params->rx_cqe_compress_def = cqe_compress_heuristic(link_speed, pci_bw);
0f6e4cf6 3887
6a9764ef
SM
3888 MLX5E_SET_PFLAG(params, MLX5E_PFLAG_RX_CQE_COMPRESS, params->rx_cqe_compress_def);
3889
3890 /* RQ */
3891 mlx5e_set_rq_params(mdev, params);
b797a684 3892
6a9764ef 3893 /* HW LRO */
c139dbfd 3894
5426a0b2 3895 /* TODO: && MLX5_CAP_ETH(mdev, lro_cap) */
6a9764ef 3896 if (params->rq_wq_type == MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ)
0f6e4cf6 3897 params->lro_en = hw_lro_heuristic(link_speed, pci_bw);
6a9764ef 3898 params->lro_timeout = mlx5e_choose_lro_timeout(mdev, MLX5E_DEFAULT_LRO_TIMEOUT);
b0d4660b 3899
6a9764ef
SM
3900 /* CQ moderation params */
3901 cq_period_mode = MLX5_CAP_GEN(mdev, cq_period_start_from_cqe) ?
3902 MLX5_CQ_PERIOD_MODE_START_FROM_CQE :
3903 MLX5_CQ_PERIOD_MODE_START_FROM_EQE;
3904 params->rx_am_enabled = MLX5_CAP_GEN(mdev, cq_moderation);
3905 mlx5e_set_rx_cq_mode_params(params, cq_period_mode);
9908aa29 3906
6a9764ef
SM
3907 params->tx_cq_moderation.usec = MLX5E_PARAMS_DEFAULT_TX_CQ_MODERATION_USEC;
3908 params->tx_cq_moderation.pkts = MLX5E_PARAMS_DEFAULT_TX_CQ_MODERATION_PKTS;
9908aa29 3909
6a9764ef
SM
3910 /* TX inline */
3911 params->tx_max_inline = mlx5e_get_max_inline_cap(mdev);
3912 mlx5_query_min_inline(mdev, &params->tx_min_inline_mode);
3913 if (params->tx_min_inline_mode == MLX5_INLINE_MODE_NONE &&
a6f402e4 3914 !MLX5_CAP_ETH(mdev, wqe_vlan_insert))
6a9764ef 3915 params->tx_min_inline_mode = MLX5_INLINE_MODE_L2;
a6f402e4 3916
6a9764ef
SM
3917 /* RSS */
3918 params->rss_hfunc = ETH_RSS_HASH_XOR;
3919 netdev_rss_key_fill(params->toeplitz_hash_key, sizeof(params->toeplitz_hash_key));
3920 mlx5e_build_default_indir_rqt(mdev, params->indirection_rqt,
3921 MLX5E_INDIR_RQT_SIZE, max_channels);
3922}
f62b8bb8 3923
6a9764ef
SM
3924static void mlx5e_build_nic_netdev_priv(struct mlx5_core_dev *mdev,
3925 struct net_device *netdev,
3926 const struct mlx5e_profile *profile,
3927 void *ppriv)
3928{
3929 struct mlx5e_priv *priv = netdev_priv(netdev);
57afead5 3930
6a9764ef
SM
3931 priv->mdev = mdev;
3932 priv->netdev = netdev;
3933 priv->profile = profile;
3934 priv->ppriv = ppriv;
c139dbfd 3935 priv->hard_mtu = MLX5E_ETH_HARD_MTU;
2d75b2bc 3936
6a9764ef 3937 mlx5e_build_nic_params(mdev, &priv->channels.params, profile->max_nch(mdev));
9908aa29 3938
f62b8bb8
AV
3939 mutex_init(&priv->state_lock);
3940
3941 INIT_WORK(&priv->update_carrier_work, mlx5e_update_carrier_work);
3942 INIT_WORK(&priv->set_rx_mode_work, mlx5e_set_rx_mode_work);
3947ca18 3943 INIT_WORK(&priv->tx_timeout_work, mlx5e_tx_timeout_work);
f62b8bb8
AV
3944 INIT_DELAYED_WORK(&priv->update_stats_work, mlx5e_update_stats_work);
3945}
3946
3947static void mlx5e_set_netdev_dev_addr(struct net_device *netdev)
3948{
3949 struct mlx5e_priv *priv = netdev_priv(netdev);
3950
e1d7d349 3951 mlx5_query_nic_vport_mac_address(priv->mdev, 0, netdev->dev_addr);
108805fc
SM
3952 if (is_zero_ether_addr(netdev->dev_addr) &&
3953 !MLX5_CAP_GEN(priv->mdev, vport_group_manager)) {
3954 eth_hw_addr_random(netdev);
3955 mlx5_core_info(priv->mdev, "Assigned random MAC address %pM\n", netdev->dev_addr);
3956 }
f62b8bb8
AV
3957}
3958
cb67b832
HHZ
3959static const struct switchdev_ops mlx5e_switchdev_ops = {
3960 .switchdev_port_attr_get = mlx5e_attr_get,
3961};
3962
6bfd390b 3963static void mlx5e_build_nic_netdev(struct net_device *netdev)
f62b8bb8
AV
3964{
3965 struct mlx5e_priv *priv = netdev_priv(netdev);
3966 struct mlx5_core_dev *mdev = priv->mdev;
94cb1ebb
EBE
3967 bool fcs_supported;
3968 bool fcs_enabled;
f62b8bb8
AV
3969
3970 SET_NETDEV_DEV(netdev, &mdev->pdev->dev);
3971
08fb1dac 3972 if (MLX5_CAP_GEN(mdev, vport_group_manager)) {
b0eed40e 3973 netdev->netdev_ops = &mlx5e_netdev_ops_sriov;
08fb1dac 3974#ifdef CONFIG_MLX5_CORE_EN_DCB
80653f73
HN
3975 if (MLX5_CAP_GEN(mdev, qos))
3976 netdev->dcbnl_ops = &mlx5e_dcbnl_ops;
08fb1dac
SM
3977#endif
3978 } else {
b0eed40e 3979 netdev->netdev_ops = &mlx5e_netdev_ops_basic;
08fb1dac 3980 }
66e49ded 3981
f62b8bb8
AV
3982 netdev->watchdog_timeo = 15 * HZ;
3983
3984 netdev->ethtool_ops = &mlx5e_ethtool_ops;
3985
12be4b21 3986 netdev->vlan_features |= NETIF_F_SG;
f62b8bb8
AV
3987 netdev->vlan_features |= NETIF_F_IP_CSUM;
3988 netdev->vlan_features |= NETIF_F_IPV6_CSUM;
3989 netdev->vlan_features |= NETIF_F_GRO;
3990 netdev->vlan_features |= NETIF_F_TSO;
3991 netdev->vlan_features |= NETIF_F_TSO6;
3992 netdev->vlan_features |= NETIF_F_RXCSUM;
3993 netdev->vlan_features |= NETIF_F_RXHASH;
3994
3995 if (!!MLX5_CAP_ETH(mdev, lro_cap))
3996 netdev->vlan_features |= NETIF_F_LRO;
3997
3998 netdev->hw_features = netdev->vlan_features;
e4cf27bd 3999 netdev->hw_features |= NETIF_F_HW_VLAN_CTAG_TX;
f62b8bb8
AV
4000 netdev->hw_features |= NETIF_F_HW_VLAN_CTAG_RX;
4001 netdev->hw_features |= NETIF_F_HW_VLAN_CTAG_FILTER;
4002
b3f63c3d 4003 if (mlx5e_vxlan_allowed(mdev)) {
b49663c8
AD
4004 netdev->hw_features |= NETIF_F_GSO_UDP_TUNNEL |
4005 NETIF_F_GSO_UDP_TUNNEL_CSUM |
4006 NETIF_F_GSO_PARTIAL;
b3f63c3d 4007 netdev->hw_enc_features |= NETIF_F_IP_CSUM;
f3ed653c 4008 netdev->hw_enc_features |= NETIF_F_IPV6_CSUM;
b3f63c3d
MF
4009 netdev->hw_enc_features |= NETIF_F_TSO;
4010 netdev->hw_enc_features |= NETIF_F_TSO6;
b3f63c3d 4011 netdev->hw_enc_features |= NETIF_F_GSO_UDP_TUNNEL;
b49663c8
AD
4012 netdev->hw_enc_features |= NETIF_F_GSO_UDP_TUNNEL_CSUM |
4013 NETIF_F_GSO_PARTIAL;
4014 netdev->gso_partial_features = NETIF_F_GSO_UDP_TUNNEL_CSUM;
b3f63c3d
MF
4015 }
4016
94cb1ebb
EBE
4017 mlx5_query_port_fcs(mdev, &fcs_supported, &fcs_enabled);
4018
4019 if (fcs_supported)
4020 netdev->hw_features |= NETIF_F_RXALL;
4021
102722fc
GE
4022 if (MLX5_CAP_ETH(mdev, scatter_fcs))
4023 netdev->hw_features |= NETIF_F_RXFCS;
4024
f62b8bb8 4025 netdev->features = netdev->hw_features;
6a9764ef 4026 if (!priv->channels.params.lro_en)
f62b8bb8
AV
4027 netdev->features &= ~NETIF_F_LRO;
4028
94cb1ebb
EBE
4029 if (fcs_enabled)
4030 netdev->features &= ~NETIF_F_RXALL;
4031
102722fc
GE
4032 if (!priv->channels.params.scatter_fcs_en)
4033 netdev->features &= ~NETIF_F_RXFCS;
4034
e8f887ac
AV
4035#define FT_CAP(f) MLX5_CAP_FLOWTABLE(mdev, flow_table_properties_nic_receive.f)
4036 if (FT_CAP(flow_modify_en) &&
4037 FT_CAP(modify_root) &&
4038 FT_CAP(identified_miss_table_mode) &&
1cabe6b0
MG
4039 FT_CAP(flow_table_modify)) {
4040 netdev->hw_features |= NETIF_F_HW_TC;
4041#ifdef CONFIG_RFS_ACCEL
4042 netdev->hw_features |= NETIF_F_NTUPLE;
4043#endif
4044 }
e8f887ac 4045
f62b8bb8
AV
4046 netdev->features |= NETIF_F_HIGHDMA;
4047
4048 netdev->priv_flags |= IFF_UNICAST_FLT;
4049
4050 mlx5e_set_netdev_dev_addr(netdev);
cb67b832
HHZ
4051
4052#ifdef CONFIG_NET_SWITCHDEV
4053 if (MLX5_CAP_GEN(mdev, vport_group_manager))
4054 netdev->switchdev_ops = &mlx5e_switchdev_ops;
4055#endif
547eede0
IT
4056
4057 mlx5e_ipsec_build_netdev(priv);
f62b8bb8
AV
4058}
4059
593cf338
RS
4060static void mlx5e_create_q_counter(struct mlx5e_priv *priv)
4061{
4062 struct mlx5_core_dev *mdev = priv->mdev;
4063 int err;
4064
4065 err = mlx5_core_alloc_q_counter(mdev, &priv->q_counter);
4066 if (err) {
4067 mlx5_core_warn(mdev, "alloc queue counter failed, %d\n", err);
4068 priv->q_counter = 0;
4069 }
4070}
4071
4072static void mlx5e_destroy_q_counter(struct mlx5e_priv *priv)
4073{
4074 if (!priv->q_counter)
4075 return;
4076
4077 mlx5_core_dealloc_q_counter(priv->mdev, priv->q_counter);
4078}
4079
6bfd390b
HHZ
4080static void mlx5e_nic_init(struct mlx5_core_dev *mdev,
4081 struct net_device *netdev,
127ea380
HHZ
4082 const struct mlx5e_profile *profile,
4083 void *ppriv)
6bfd390b
HHZ
4084{
4085 struct mlx5e_priv *priv = netdev_priv(netdev);
547eede0 4086 int err;
6bfd390b 4087
127ea380 4088 mlx5e_build_nic_netdev_priv(mdev, netdev, profile, ppriv);
547eede0
IT
4089 err = mlx5e_ipsec_init(priv);
4090 if (err)
4091 mlx5_core_err(mdev, "IPSec initialization failed, %d\n", err);
6bfd390b
HHZ
4092 mlx5e_build_nic_netdev(netdev);
4093 mlx5e_vxlan_init(priv);
4094}
4095
4096static void mlx5e_nic_cleanup(struct mlx5e_priv *priv)
4097{
547eede0 4098 mlx5e_ipsec_cleanup(priv);
6bfd390b 4099 mlx5e_vxlan_cleanup(priv);
127ea380 4100
6a9764ef
SM
4101 if (priv->channels.params.xdp_prog)
4102 bpf_prog_put(priv->channels.params.xdp_prog);
6bfd390b
HHZ
4103}
4104
4105static int mlx5e_init_nic_rx(struct mlx5e_priv *priv)
4106{
4107 struct mlx5_core_dev *mdev = priv->mdev;
4108 int err;
6bfd390b 4109
8f493ffd
SM
4110 err = mlx5e_create_indirect_rqt(priv);
4111 if (err)
6bfd390b 4112 return err;
6bfd390b
HHZ
4113
4114 err = mlx5e_create_direct_rqts(priv);
8f493ffd 4115 if (err)
6bfd390b 4116 goto err_destroy_indirect_rqts;
6bfd390b
HHZ
4117
4118 err = mlx5e_create_indirect_tirs(priv);
8f493ffd 4119 if (err)
6bfd390b 4120 goto err_destroy_direct_rqts;
6bfd390b
HHZ
4121
4122 err = mlx5e_create_direct_tirs(priv);
8f493ffd 4123 if (err)
6bfd390b 4124 goto err_destroy_indirect_tirs;
6bfd390b
HHZ
4125
4126 err = mlx5e_create_flow_steering(priv);
4127 if (err) {
4128 mlx5_core_warn(mdev, "create flow steering failed, %d\n", err);
4129 goto err_destroy_direct_tirs;
4130 }
4131
4132 err = mlx5e_tc_init(priv);
4133 if (err)
4134 goto err_destroy_flow_steering;
4135
4136 return 0;
4137
4138err_destroy_flow_steering:
4139 mlx5e_destroy_flow_steering(priv);
4140err_destroy_direct_tirs:
4141 mlx5e_destroy_direct_tirs(priv);
4142err_destroy_indirect_tirs:
4143 mlx5e_destroy_indirect_tirs(priv);
4144err_destroy_direct_rqts:
8f493ffd 4145 mlx5e_destroy_direct_rqts(priv);
6bfd390b
HHZ
4146err_destroy_indirect_rqts:
4147 mlx5e_destroy_rqt(priv, &priv->indir_rqt);
4148 return err;
4149}
4150
4151static void mlx5e_cleanup_nic_rx(struct mlx5e_priv *priv)
4152{
6bfd390b
HHZ
4153 mlx5e_tc_cleanup(priv);
4154 mlx5e_destroy_flow_steering(priv);
4155 mlx5e_destroy_direct_tirs(priv);
4156 mlx5e_destroy_indirect_tirs(priv);
8f493ffd 4157 mlx5e_destroy_direct_rqts(priv);
6bfd390b
HHZ
4158 mlx5e_destroy_rqt(priv, &priv->indir_rqt);
4159}
4160
4161static int mlx5e_init_nic_tx(struct mlx5e_priv *priv)
4162{
4163 int err;
4164
4165 err = mlx5e_create_tises(priv);
4166 if (err) {
4167 mlx5_core_warn(priv->mdev, "create tises failed, %d\n", err);
4168 return err;
4169 }
4170
4171#ifdef CONFIG_MLX5_CORE_EN_DCB
e207b7e9 4172 mlx5e_dcbnl_initialize(priv);
6bfd390b
HHZ
4173#endif
4174 return 0;
4175}
4176
4177static void mlx5e_nic_enable(struct mlx5e_priv *priv)
4178{
4179 struct net_device *netdev = priv->netdev;
4180 struct mlx5_core_dev *mdev = priv->mdev;
2c3b5bee
SM
4181 u16 max_mtu;
4182
4183 mlx5e_init_l2_addr(priv);
4184
4185 /* MTU range: 68 - hw-specific max */
4186 netdev->min_mtu = ETH_MIN_MTU;
4187 mlx5_query_port_max_mtu(priv->mdev, &max_mtu, 1);
c139dbfd 4188 netdev->max_mtu = MLX5E_HW2SW_MTU(priv, max_mtu);
2c3b5bee 4189 mlx5e_set_dev_port_mtu(priv);
6bfd390b 4190
7907f23a
AH
4191 mlx5_lag_add(mdev, netdev);
4192
6bfd390b 4193 mlx5e_enable_async_events(priv);
127ea380 4194
1d447a39
SM
4195 if (MLX5_CAP_GEN(mdev, vport_group_manager))
4196 mlx5e_register_vport_reps(priv);
2c3b5bee 4197
610e89e0
SM
4198 if (netdev->reg_state != NETREG_REGISTERED)
4199 return;
4200
4201 /* Device already registered: sync netdev system state */
4202 if (mlx5e_vxlan_allowed(mdev)) {
4203 rtnl_lock();
4204 udp_tunnel_get_rx_info(netdev);
4205 rtnl_unlock();
4206 }
4207
4208 queue_work(priv->wq, &priv->set_rx_mode_work);
2c3b5bee
SM
4209
4210 rtnl_lock();
4211 if (netif_running(netdev))
4212 mlx5e_open(netdev);
4213 netif_device_attach(netdev);
4214 rtnl_unlock();
6bfd390b
HHZ
4215}
4216
4217static void mlx5e_nic_disable(struct mlx5e_priv *priv)
4218{
3deef8ce 4219 struct mlx5_core_dev *mdev = priv->mdev;
3deef8ce 4220
2c3b5bee
SM
4221 rtnl_lock();
4222 if (netif_running(priv->netdev))
4223 mlx5e_close(priv->netdev);
4224 netif_device_detach(priv->netdev);
4225 rtnl_unlock();
4226
6bfd390b 4227 queue_work(priv->wq, &priv->set_rx_mode_work);
1d447a39 4228
3deef8ce 4229 if (MLX5_CAP_GEN(mdev, vport_group_manager))
1d447a39
SM
4230 mlx5e_unregister_vport_reps(priv);
4231
6bfd390b 4232 mlx5e_disable_async_events(priv);
3deef8ce 4233 mlx5_lag_remove(mdev);
6bfd390b
HHZ
4234}
4235
4236static const struct mlx5e_profile mlx5e_nic_profile = {
4237 .init = mlx5e_nic_init,
4238 .cleanup = mlx5e_nic_cleanup,
4239 .init_rx = mlx5e_init_nic_rx,
4240 .cleanup_rx = mlx5e_cleanup_nic_rx,
4241 .init_tx = mlx5e_init_nic_tx,
4242 .cleanup_tx = mlx5e_cleanup_nic_tx,
4243 .enable = mlx5e_nic_enable,
4244 .disable = mlx5e_nic_disable,
3834a5e6 4245 .update_stats = mlx5e_update_ndo_stats,
6bfd390b 4246 .max_nch = mlx5e_get_max_num_channels,
7ca42c80 4247 .update_carrier = mlx5e_update_carrier,
20fd0c19
SM
4248 .rx_handlers.handle_rx_cqe = mlx5e_handle_rx_cqe,
4249 .rx_handlers.handle_rx_cqe_mpwqe = mlx5e_handle_rx_cqe_mpwrq,
6bfd390b
HHZ
4250 .max_tc = MLX5E_MAX_NUM_TC,
4251};
4252
2c3b5bee
SM
4253/* mlx5e generic netdev management API (move to en_common.c) */
4254
26e59d80
MHY
4255struct net_device *mlx5e_create_netdev(struct mlx5_core_dev *mdev,
4256 const struct mlx5e_profile *profile,
4257 void *ppriv)
f62b8bb8 4258{
26e59d80 4259 int nch = profile->max_nch(mdev);
f62b8bb8
AV
4260 struct net_device *netdev;
4261 struct mlx5e_priv *priv;
f62b8bb8 4262
08fb1dac 4263 netdev = alloc_etherdev_mqs(sizeof(struct mlx5e_priv),
6bfd390b 4264 nch * profile->max_tc,
08fb1dac 4265 nch);
f62b8bb8
AV
4266 if (!netdev) {
4267 mlx5_core_err(mdev, "alloc_etherdev_mqs() failed\n");
4268 return NULL;
4269 }
4270
be4891af
SM
4271#ifdef CONFIG_RFS_ACCEL
4272 netdev->rx_cpu_rmap = mdev->rmap;
4273#endif
4274
127ea380 4275 profile->init(mdev, netdev, profile, ppriv);
f62b8bb8
AV
4276
4277 netif_carrier_off(netdev);
4278
4279 priv = netdev_priv(netdev);
4280
7bb29755
MF
4281 priv->wq = create_singlethread_workqueue("mlx5e");
4282 if (!priv->wq)
26e59d80
MHY
4283 goto err_cleanup_nic;
4284
4285 return netdev;
4286
4287err_cleanup_nic:
31ac9338
OG
4288 if (profile->cleanup)
4289 profile->cleanup(priv);
26e59d80
MHY
4290 free_netdev(netdev);
4291
4292 return NULL;
4293}
4294
2c3b5bee 4295int mlx5e_attach_netdev(struct mlx5e_priv *priv)
26e59d80 4296{
2c3b5bee 4297 struct mlx5_core_dev *mdev = priv->mdev;
26e59d80 4298 const struct mlx5e_profile *profile;
26e59d80
MHY
4299 int err;
4300
26e59d80
MHY
4301 profile = priv->profile;
4302 clear_bit(MLX5E_STATE_DESTROYING, &priv->state);
7bb29755 4303
6bfd390b
HHZ
4304 err = profile->init_tx(priv);
4305 if (err)
ec8b9981 4306 goto out;
5c50368f 4307
a43b25da 4308 err = mlx5e_open_drop_rq(mdev, &priv->drop_rq);
5c50368f
AS
4309 if (err) {
4310 mlx5_core_err(mdev, "open drop rq failed, %d\n", err);
6bfd390b 4311 goto err_cleanup_tx;
5c50368f
AS
4312 }
4313
6bfd390b
HHZ
4314 err = profile->init_rx(priv);
4315 if (err)
5c50368f 4316 goto err_close_drop_rq;
5c50368f 4317
593cf338
RS
4318 mlx5e_create_q_counter(priv);
4319
6bfd390b
HHZ
4320 if (profile->enable)
4321 profile->enable(priv);
f62b8bb8 4322
26e59d80 4323 return 0;
5c50368f
AS
4324
4325err_close_drop_rq:
a43b25da 4326 mlx5e_close_drop_rq(&priv->drop_rq);
5c50368f 4327
6bfd390b
HHZ
4328err_cleanup_tx:
4329 profile->cleanup_tx(priv);
5c50368f 4330
26e59d80
MHY
4331out:
4332 return err;
f62b8bb8
AV
4333}
4334
2c3b5bee 4335void mlx5e_detach_netdev(struct mlx5e_priv *priv)
26e59d80 4336{
26e59d80
MHY
4337 const struct mlx5e_profile *profile = priv->profile;
4338
4339 set_bit(MLX5E_STATE_DESTROYING, &priv->state);
26e59d80 4340
37f304d1
SM
4341 if (profile->disable)
4342 profile->disable(priv);
4343 flush_workqueue(priv->wq);
4344
26e59d80
MHY
4345 mlx5e_destroy_q_counter(priv);
4346 profile->cleanup_rx(priv);
a43b25da 4347 mlx5e_close_drop_rq(&priv->drop_rq);
26e59d80 4348 profile->cleanup_tx(priv);
26e59d80
MHY
4349 cancel_delayed_work_sync(&priv->update_stats_work);
4350}
4351
2c3b5bee
SM
4352void mlx5e_destroy_netdev(struct mlx5e_priv *priv)
4353{
4354 const struct mlx5e_profile *profile = priv->profile;
4355 struct net_device *netdev = priv->netdev;
4356
4357 destroy_workqueue(priv->wq);
4358 if (profile->cleanup)
4359 profile->cleanup(priv);
4360 free_netdev(netdev);
4361}
4362
26e59d80
MHY
4363/* mlx5e_attach and mlx5e_detach scope should be only creating/destroying
4364 * hardware contexts and to connect it to the current netdev.
4365 */
4366static int mlx5e_attach(struct mlx5_core_dev *mdev, void *vpriv)
4367{
4368 struct mlx5e_priv *priv = vpriv;
4369 struct net_device *netdev = priv->netdev;
4370 int err;
4371
4372 if (netif_device_present(netdev))
4373 return 0;
4374
4375 err = mlx5e_create_mdev_resources(mdev);
4376 if (err)
4377 return err;
4378
2c3b5bee 4379 err = mlx5e_attach_netdev(priv);
26e59d80
MHY
4380 if (err) {
4381 mlx5e_destroy_mdev_resources(mdev);
4382 return err;
4383 }
4384
4385 return 0;
4386}
4387
4388static void mlx5e_detach(struct mlx5_core_dev *mdev, void *vpriv)
4389{
4390 struct mlx5e_priv *priv = vpriv;
4391 struct net_device *netdev = priv->netdev;
4392
4393 if (!netif_device_present(netdev))
4394 return;
4395
2c3b5bee 4396 mlx5e_detach_netdev(priv);
26e59d80
MHY
4397 mlx5e_destroy_mdev_resources(mdev);
4398}
4399
b50d292b
HHZ
4400static void *mlx5e_add(struct mlx5_core_dev *mdev)
4401{
127ea380 4402 struct mlx5_eswitch *esw = mdev->priv.eswitch;
26e59d80 4403 int total_vfs = MLX5_TOTAL_VPORTS(mdev);
1d447a39 4404 struct mlx5e_rep_priv *rpriv = NULL;
26e59d80
MHY
4405 void *priv;
4406 int vport;
4407 int err;
4408 struct net_device *netdev;
b50d292b 4409
26e59d80
MHY
4410 err = mlx5e_check_required_hca_cap(mdev);
4411 if (err)
b50d292b
HHZ
4412 return NULL;
4413
1d447a39
SM
4414 if (MLX5_CAP_GEN(mdev, vport_group_manager)) {
4415 rpriv = kzalloc(sizeof(*rpriv), GFP_KERNEL);
4416 if (!rpriv) {
4417 mlx5_core_warn(mdev,
4418 "Not creating net device, Failed to alloc rep priv data\n");
4419 return NULL;
4420 }
4421 rpriv->rep = &esw->offloads.vport_reps[0];
4422 }
127ea380 4423
1d447a39 4424 netdev = mlx5e_create_netdev(mdev, &mlx5e_nic_profile, rpriv);
26e59d80
MHY
4425 if (!netdev) {
4426 mlx5_core_err(mdev, "mlx5e_create_netdev failed\n");
4427 goto err_unregister_reps;
4428 }
4429
4430 priv = netdev_priv(netdev);
4431
4432 err = mlx5e_attach(mdev, priv);
4433 if (err) {
4434 mlx5_core_err(mdev, "mlx5e_attach failed, %d\n", err);
4435 goto err_destroy_netdev;
4436 }
4437
4438 err = register_netdev(netdev);
4439 if (err) {
4440 mlx5_core_err(mdev, "register_netdev failed, %d\n", err);
4441 goto err_detach;
b50d292b 4442 }
26e59d80
MHY
4443
4444 return priv;
4445
4446err_detach:
4447 mlx5e_detach(mdev, priv);
4448
4449err_destroy_netdev:
2c3b5bee 4450 mlx5e_destroy_netdev(priv);
26e59d80
MHY
4451
4452err_unregister_reps:
4453 for (vport = 1; vport < total_vfs; vport++)
4454 mlx5_eswitch_unregister_vport_rep(esw, vport);
4455
1d447a39 4456 kfree(rpriv);
26e59d80 4457 return NULL;
b50d292b
HHZ
4458}
4459
b50d292b
HHZ
4460static void mlx5e_remove(struct mlx5_core_dev *mdev, void *vpriv)
4461{
4462 struct mlx5e_priv *priv = vpriv;
1d447a39 4463 void *ppriv = priv->ppriv;
127ea380 4464
5e1e93c7 4465 unregister_netdev(priv->netdev);
26e59d80 4466 mlx5e_detach(mdev, vpriv);
2c3b5bee 4467 mlx5e_destroy_netdev(priv);
1d447a39 4468 kfree(ppriv);
b50d292b
HHZ
4469}
4470
f62b8bb8
AV
4471static void *mlx5e_get_netdev(void *vpriv)
4472{
4473 struct mlx5e_priv *priv = vpriv;
4474
4475 return priv->netdev;
4476}
4477
4478static struct mlx5_interface mlx5e_interface = {
b50d292b
HHZ
4479 .add = mlx5e_add,
4480 .remove = mlx5e_remove,
26e59d80
MHY
4481 .attach = mlx5e_attach,
4482 .detach = mlx5e_detach,
f62b8bb8
AV
4483 .event = mlx5e_async_event,
4484 .protocol = MLX5_INTERFACE_PROTOCOL_ETH,
4485 .get_dev = mlx5e_get_netdev,
4486};
4487
4488void mlx5e_init(void)
4489{
665bc539 4490 mlx5e_build_ptys2ethtool_map();
f62b8bb8
AV
4491 mlx5_register_interface(&mlx5e_interface);
4492}
4493
4494void mlx5e_cleanup(void)
4495{
4496 mlx5_unregister_interface(&mlx5e_interface);
4497}