]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blame - drivers/net/ethernet/mellanox/mlx5/core/en_main.c
net/mlx5: Add MTPPS and MTPPSE registers infrastructure
[mirror_ubuntu-bionic-kernel.git] / drivers / net / ethernet / mellanox / mlx5 / core / en_main.c
CommitLineData
f62b8bb8 1/*
b3f63c3d 2 * Copyright (c) 2015-2016, Mellanox Technologies. All rights reserved.
f62b8bb8
AV
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
32
e8f887ac
AV
33#include <net/tc_act/tc_gact.h>
34#include <net/pkt_cls.h>
86d722ad 35#include <linux/mlx5/fs.h>
b3f63c3d 36#include <net/vxlan.h>
86994156 37#include <linux/bpf.h>
f62b8bb8 38#include "en.h"
e8f887ac 39#include "en_tc.h"
66e49ded 40#include "eswitch.h"
b3f63c3d 41#include "vxlan.h"
f62b8bb8
AV
42
43struct mlx5e_rq_param {
cb3c7fd4
GR
44 u32 rqc[MLX5_ST_SZ_DW(rqc)];
45 struct mlx5_wq_param wq;
46 bool am_enabled;
f62b8bb8
AV
47};
48
49struct mlx5e_sq_param {
50 u32 sqc[MLX5_ST_SZ_DW(sqc)];
51 struct mlx5_wq_param wq;
58d52291 52 u16 max_inline;
cff92d7c 53 u8 min_inline_mode;
f10b7cc7 54 enum mlx5e_sq_type type;
f62b8bb8
AV
55};
56
57struct mlx5e_cq_param {
58 u32 cqc[MLX5_ST_SZ_DW(cqc)];
59 struct mlx5_wq_param wq;
60 u16 eq_ix;
9908aa29 61 u8 cq_period_mode;
f62b8bb8
AV
62};
63
64struct mlx5e_channel_param {
65 struct mlx5e_rq_param rq;
66 struct mlx5e_sq_param sq;
b5503b99 67 struct mlx5e_sq_param xdp_sq;
d3c9bc27 68 struct mlx5e_sq_param icosq;
f62b8bb8
AV
69 struct mlx5e_cq_param rx_cq;
70 struct mlx5e_cq_param tx_cq;
d3c9bc27 71 struct mlx5e_cq_param icosq_cq;
f62b8bb8
AV
72};
73
2fc4bfb7
SM
74static bool mlx5e_check_fragmented_striding_rq_cap(struct mlx5_core_dev *mdev)
75{
76 return MLX5_CAP_GEN(mdev, striding_rq) &&
77 MLX5_CAP_GEN(mdev, umr_ptr_rlky) &&
78 MLX5_CAP_ETH(mdev, reg_umr_sq);
79}
80
81static void mlx5e_set_rq_type_params(struct mlx5e_priv *priv, u8 rq_type)
82{
83 priv->params.rq_wq_type = rq_type;
84 switch (priv->params.rq_wq_type) {
85 case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ:
86 priv->params.log_rq_size = MLX5E_PARAMS_DEFAULT_LOG_RQ_SIZE_MPW;
9bcc8606
SD
87 priv->params.mpwqe_log_stride_sz =
88 MLX5E_GET_PFLAG(priv, MLX5E_PFLAG_RX_CQE_COMPRESS) ?
2fc4bfb7
SM
89 MLX5_MPWRQ_LOG_STRIDE_SIZE_CQE_COMPRESS :
90 MLX5_MPWRQ_LOG_STRIDE_SIZE;
91 priv->params.mpwqe_log_num_strides = MLX5_MPWRQ_LOG_WQE_SZ -
92 priv->params.mpwqe_log_stride_sz;
93 break;
94 default: /* MLX5_WQ_TYPE_LINKED_LIST */
95 priv->params.log_rq_size = MLX5E_PARAMS_DEFAULT_LOG_RQ_SIZE;
96 }
97 priv->params.min_rx_wqes = mlx5_min_rx_wqes(priv->params.rq_wq_type,
98 BIT(priv->params.log_rq_size));
99
100 mlx5_core_info(priv->mdev,
101 "MLX5E: StrdRq(%d) RqSz(%ld) StrdSz(%ld) RxCqeCmprss(%d)\n",
102 priv->params.rq_wq_type == MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ,
103 BIT(priv->params.log_rq_size),
104 BIT(priv->params.mpwqe_log_stride_sz),
9bcc8606 105 MLX5E_GET_PFLAG(priv, MLX5E_PFLAG_RX_CQE_COMPRESS));
2fc4bfb7
SM
106}
107
108static void mlx5e_set_rq_priv_params(struct mlx5e_priv *priv)
109{
86994156
RS
110 u8 rq_type = mlx5e_check_fragmented_striding_rq_cap(priv->mdev) &&
111 !priv->xdp_prog ?
2fc4bfb7
SM
112 MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ :
113 MLX5_WQ_TYPE_LINKED_LIST;
114 mlx5e_set_rq_type_params(priv, rq_type);
115}
116
f62b8bb8
AV
117static void mlx5e_update_carrier(struct mlx5e_priv *priv)
118{
119 struct mlx5_core_dev *mdev = priv->mdev;
120 u8 port_state;
121
122 port_state = mlx5_query_vport_state(mdev,
e7546514 123 MLX5_QUERY_VPORT_STATE_IN_OP_MOD_VNIC_VPORT, 0);
f62b8bb8 124
87424ad5
SD
125 if (port_state == VPORT_STATE_UP) {
126 netdev_info(priv->netdev, "Link up\n");
f62b8bb8 127 netif_carrier_on(priv->netdev);
87424ad5
SD
128 } else {
129 netdev_info(priv->netdev, "Link down\n");
f62b8bb8 130 netif_carrier_off(priv->netdev);
87424ad5 131 }
f62b8bb8
AV
132}
133
134static void mlx5e_update_carrier_work(struct work_struct *work)
135{
136 struct mlx5e_priv *priv = container_of(work, struct mlx5e_priv,
137 update_carrier_work);
138
139 mutex_lock(&priv->state_lock);
140 if (test_bit(MLX5E_STATE_OPENED, &priv->state))
141 mlx5e_update_carrier(priv);
142 mutex_unlock(&priv->state_lock);
143}
144
3947ca18
DJ
145static void mlx5e_tx_timeout_work(struct work_struct *work)
146{
147 struct mlx5e_priv *priv = container_of(work, struct mlx5e_priv,
148 tx_timeout_work);
149 int err;
150
151 rtnl_lock();
152 mutex_lock(&priv->state_lock);
153 if (!test_bit(MLX5E_STATE_OPENED, &priv->state))
154 goto unlock;
155 mlx5e_close_locked(priv->netdev);
156 err = mlx5e_open_locked(priv->netdev);
157 if (err)
158 netdev_err(priv->netdev, "mlx5e_open_locked failed recovering from a tx_timeout, err(%d).\n",
159 err);
160unlock:
161 mutex_unlock(&priv->state_lock);
162 rtnl_unlock();
163}
164
9218b44d 165static void mlx5e_update_sw_counters(struct mlx5e_priv *priv)
f62b8bb8 166{
9218b44d 167 struct mlx5e_sw_stats *s = &priv->stats.sw;
f62b8bb8
AV
168 struct mlx5e_rq_stats *rq_stats;
169 struct mlx5e_sq_stats *sq_stats;
9218b44d 170 u64 tx_offload_none = 0;
f62b8bb8
AV
171 int i, j;
172
9218b44d 173 memset(s, 0, sizeof(*s));
f62b8bb8
AV
174 for (i = 0; i < priv->params.num_channels; i++) {
175 rq_stats = &priv->channel[i]->rq.stats;
176
faf4478b
GP
177 s->rx_packets += rq_stats->packets;
178 s->rx_bytes += rq_stats->bytes;
bfe6d8d1
GP
179 s->rx_lro_packets += rq_stats->lro_packets;
180 s->rx_lro_bytes += rq_stats->lro_bytes;
f62b8bb8 181 s->rx_csum_none += rq_stats->csum_none;
bfe6d8d1
GP
182 s->rx_csum_complete += rq_stats->csum_complete;
183 s->rx_csum_unnecessary_inner += rq_stats->csum_unnecessary_inner;
86994156 184 s->rx_xdp_drop += rq_stats->xdp_drop;
b5503b99
SM
185 s->rx_xdp_tx += rq_stats->xdp_tx;
186 s->rx_xdp_tx_full += rq_stats->xdp_tx_full;
f62b8bb8 187 s->rx_wqe_err += rq_stats->wqe_err;
461017cb 188 s->rx_mpwqe_filler += rq_stats->mpwqe_filler;
54984407 189 s->rx_buff_alloc_err += rq_stats->buff_alloc_err;
7219ab34
TT
190 s->rx_cqe_compress_blks += rq_stats->cqe_compress_blks;
191 s->rx_cqe_compress_pkts += rq_stats->cqe_compress_pkts;
4415a031
TT
192 s->rx_cache_reuse += rq_stats->cache_reuse;
193 s->rx_cache_full += rq_stats->cache_full;
194 s->rx_cache_empty += rq_stats->cache_empty;
195 s->rx_cache_busy += rq_stats->cache_busy;
f62b8bb8 196
a4418a6c 197 for (j = 0; j < priv->params.num_tc; j++) {
f62b8bb8
AV
198 sq_stats = &priv->channel[i]->sq[j].stats;
199
faf4478b
GP
200 s->tx_packets += sq_stats->packets;
201 s->tx_bytes += sq_stats->bytes;
bfe6d8d1
GP
202 s->tx_tso_packets += sq_stats->tso_packets;
203 s->tx_tso_bytes += sq_stats->tso_bytes;
204 s->tx_tso_inner_packets += sq_stats->tso_inner_packets;
205 s->tx_tso_inner_bytes += sq_stats->tso_inner_bytes;
f62b8bb8
AV
206 s->tx_queue_stopped += sq_stats->stopped;
207 s->tx_queue_wake += sq_stats->wake;
208 s->tx_queue_dropped += sq_stats->dropped;
c8cf78fe 209 s->tx_xmit_more += sq_stats->xmit_more;
bfe6d8d1
GP
210 s->tx_csum_partial_inner += sq_stats->csum_partial_inner;
211 tx_offload_none += sq_stats->csum_none;
f62b8bb8
AV
212 }
213 }
214
9218b44d 215 /* Update calculated offload counters */
bfe6d8d1
GP
216 s->tx_csum_partial = s->tx_packets - tx_offload_none - s->tx_csum_partial_inner;
217 s->rx_csum_unnecessary = s->rx_packets - s->rx_csum_none - s->rx_csum_complete;
121fcdc8 218
bfe6d8d1 219 s->link_down_events_phy = MLX5_GET(ppcnt_reg,
121fcdc8
GP
220 priv->stats.pport.phy_counters,
221 counter_set.phys_layer_cntrs.link_down_events);
9218b44d
GP
222}
223
224static void mlx5e_update_vport_counters(struct mlx5e_priv *priv)
225{
226 int outlen = MLX5_ST_SZ_BYTES(query_vport_counter_out);
227 u32 *out = (u32 *)priv->stats.vport.query_vport_out;
c4f287c4 228 u32 in[MLX5_ST_SZ_DW(query_vport_counter_in)] = {0};
9218b44d
GP
229 struct mlx5_core_dev *mdev = priv->mdev;
230
f62b8bb8
AV
231 MLX5_SET(query_vport_counter_in, in, opcode,
232 MLX5_CMD_OP_QUERY_VPORT_COUNTER);
233 MLX5_SET(query_vport_counter_in, in, op_mod, 0);
234 MLX5_SET(query_vport_counter_in, in, other_vport, 0);
235
236 memset(out, 0, outlen);
9218b44d
GP
237 mlx5_cmd_exec(mdev, in, sizeof(in), out, outlen);
238}
239
240static void mlx5e_update_pport_counters(struct mlx5e_priv *priv)
241{
242 struct mlx5e_pport_stats *pstats = &priv->stats.pport;
243 struct mlx5_core_dev *mdev = priv->mdev;
244 int sz = MLX5_ST_SZ_BYTES(ppcnt_reg);
cf678570 245 int prio;
9218b44d
GP
246 void *out;
247 u32 *in;
248
249 in = mlx5_vzalloc(sz);
250 if (!in)
f62b8bb8
AV
251 goto free_out;
252
9218b44d 253 MLX5_SET(ppcnt_reg, in, local_port, 1);
f62b8bb8 254
9218b44d
GP
255 out = pstats->IEEE_802_3_counters;
256 MLX5_SET(ppcnt_reg, in, grp, MLX5_IEEE_802_3_COUNTERS_GROUP);
257 mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_PPCNT, 0, 0);
f62b8bb8 258
9218b44d
GP
259 out = pstats->RFC_2863_counters;
260 MLX5_SET(ppcnt_reg, in, grp, MLX5_RFC_2863_COUNTERS_GROUP);
261 mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_PPCNT, 0, 0);
262
263 out = pstats->RFC_2819_counters;
264 MLX5_SET(ppcnt_reg, in, grp, MLX5_RFC_2819_COUNTERS_GROUP);
265 mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_PPCNT, 0, 0);
593cf338 266
121fcdc8
GP
267 out = pstats->phy_counters;
268 MLX5_SET(ppcnt_reg, in, grp, MLX5_PHYSICAL_LAYER_COUNTERS_GROUP);
269 mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_PPCNT, 0, 0);
270
cf678570
GP
271 MLX5_SET(ppcnt_reg, in, grp, MLX5_PER_PRIORITY_COUNTERS_GROUP);
272 for (prio = 0; prio < NUM_PPORT_PRIO; prio++) {
273 out = pstats->per_prio_counters[prio];
274 MLX5_SET(ppcnt_reg, in, prio_tc, prio);
275 mlx5_core_access_reg(mdev, in, sz, out, sz,
276 MLX5_REG_PPCNT, 0, 0);
277 }
278
f62b8bb8 279free_out:
9218b44d
GP
280 kvfree(in);
281}
282
283static void mlx5e_update_q_counter(struct mlx5e_priv *priv)
284{
285 struct mlx5e_qcounter_stats *qcnt = &priv->stats.qcnt;
286
287 if (!priv->q_counter)
288 return;
289
290 mlx5_core_query_out_of_buffer(priv->mdev, priv->q_counter,
291 &qcnt->rx_out_of_buffer);
292}
293
294void mlx5e_update_stats(struct mlx5e_priv *priv)
295{
9218b44d
GP
296 mlx5e_update_q_counter(priv);
297 mlx5e_update_vport_counters(priv);
298 mlx5e_update_pport_counters(priv);
121fcdc8 299 mlx5e_update_sw_counters(priv);
f62b8bb8
AV
300}
301
cb67b832 302void mlx5e_update_stats_work(struct work_struct *work)
f62b8bb8
AV
303{
304 struct delayed_work *dwork = to_delayed_work(work);
305 struct mlx5e_priv *priv = container_of(dwork, struct mlx5e_priv,
306 update_stats_work);
307 mutex_lock(&priv->state_lock);
308 if (test_bit(MLX5E_STATE_OPENED, &priv->state)) {
6bfd390b 309 priv->profile->update_stats(priv);
7bb29755
MF
310 queue_delayed_work(priv->wq, dwork,
311 msecs_to_jiffies(MLX5E_UPDATE_STATS_INTERVAL));
f62b8bb8
AV
312 }
313 mutex_unlock(&priv->state_lock);
314}
315
daa21560
TT
316static void mlx5e_async_event(struct mlx5_core_dev *mdev, void *vpriv,
317 enum mlx5_dev_event event, unsigned long param)
f62b8bb8 318{
daa21560
TT
319 struct mlx5e_priv *priv = vpriv;
320
e0f46eb9 321 if (!test_bit(MLX5E_STATE_ASYNC_EVENTS_ENABLED, &priv->state))
daa21560
TT
322 return;
323
f62b8bb8
AV
324 switch (event) {
325 case MLX5_DEV_EVENT_PORT_UP:
326 case MLX5_DEV_EVENT_PORT_DOWN:
7bb29755 327 queue_work(priv->wq, &priv->update_carrier_work);
f62b8bb8
AV
328 break;
329
330 default:
331 break;
332 }
333}
334
f62b8bb8
AV
335static void mlx5e_enable_async_events(struct mlx5e_priv *priv)
336{
e0f46eb9 337 set_bit(MLX5E_STATE_ASYNC_EVENTS_ENABLED, &priv->state);
f62b8bb8
AV
338}
339
340static void mlx5e_disable_async_events(struct mlx5e_priv *priv)
341{
e0f46eb9 342 clear_bit(MLX5E_STATE_ASYNC_EVENTS_ENABLED, &priv->state);
daa21560 343 synchronize_irq(mlx5_get_msix_vec(priv->mdev, MLX5_EQ_VEC_ASYNC));
f62b8bb8
AV
344}
345
7e426671
TT
346static inline int mlx5e_get_wqe_mtt_sz(void)
347{
348 /* UMR copies MTTs in units of MLX5_UMR_MTT_ALIGNMENT bytes.
349 * To avoid copying garbage after the mtt array, we allocate
350 * a little more.
351 */
352 return ALIGN(MLX5_MPWRQ_PAGES_PER_WQE * sizeof(__be64),
353 MLX5_UMR_MTT_ALIGNMENT);
354}
355
356static inline void mlx5e_build_umr_wqe(struct mlx5e_rq *rq, struct mlx5e_sq *sq,
357 struct mlx5e_umr_wqe *wqe, u16 ix)
358{
359 struct mlx5_wqe_ctrl_seg *cseg = &wqe->ctrl;
360 struct mlx5_wqe_umr_ctrl_seg *ucseg = &wqe->uctrl;
361 struct mlx5_wqe_data_seg *dseg = &wqe->data;
21c59685 362 struct mlx5e_mpw_info *wi = &rq->mpwqe.info[ix];
7e426671
TT
363 u8 ds_cnt = DIV_ROUND_UP(sizeof(*wqe), MLX5_SEND_WQE_DS);
364 u32 umr_wqe_mtt_offset = mlx5e_get_wqe_mtt_offset(rq, ix);
365
366 cseg->qpn_ds = cpu_to_be32((sq->sqn << MLX5_WQE_CTRL_QPN_SHIFT) |
367 ds_cnt);
368 cseg->fm_ce_se = MLX5_WQE_CTRL_CQ_UPDATE;
369 cseg->imm = rq->mkey_be;
370
371 ucseg->flags = MLX5_UMR_TRANSLATION_OFFSET_EN;
31616255 372 ucseg->xlt_octowords =
7e426671
TT
373 cpu_to_be16(MLX5_MTT_OCTW(MLX5_MPWRQ_PAGES_PER_WQE));
374 ucseg->bsf_octowords =
375 cpu_to_be16(MLX5_MTT_OCTW(umr_wqe_mtt_offset));
376 ucseg->mkey_mask = cpu_to_be64(MLX5_MKEY_MASK_FREE);
377
378 dseg->lkey = sq->mkey_be;
379 dseg->addr = cpu_to_be64(wi->umr.mtt_addr);
380}
381
382static int mlx5e_rq_alloc_mpwqe_info(struct mlx5e_rq *rq,
383 struct mlx5e_channel *c)
384{
385 int wq_sz = mlx5_wq_ll_get_size(&rq->wq);
386 int mtt_sz = mlx5e_get_wqe_mtt_sz();
387 int mtt_alloc = mtt_sz + MLX5_UMR_ALIGN - 1;
388 int i;
389
21c59685
SM
390 rq->mpwqe.info = kzalloc_node(wq_sz * sizeof(*rq->mpwqe.info),
391 GFP_KERNEL, cpu_to_node(c->cpu));
392 if (!rq->mpwqe.info)
7e426671
TT
393 goto err_out;
394
395 /* We allocate more than mtt_sz as we will align the pointer */
21c59685 396 rq->mpwqe.mtt_no_align = kzalloc_node(mtt_alloc * wq_sz, GFP_KERNEL,
7e426671 397 cpu_to_node(c->cpu));
21c59685 398 if (unlikely(!rq->mpwqe.mtt_no_align))
7e426671
TT
399 goto err_free_wqe_info;
400
401 for (i = 0; i < wq_sz; i++) {
21c59685 402 struct mlx5e_mpw_info *wi = &rq->mpwqe.info[i];
7e426671 403
21c59685 404 wi->umr.mtt = PTR_ALIGN(rq->mpwqe.mtt_no_align + i * mtt_alloc,
7e426671
TT
405 MLX5_UMR_ALIGN);
406 wi->umr.mtt_addr = dma_map_single(c->pdev, wi->umr.mtt, mtt_sz,
407 PCI_DMA_TODEVICE);
408 if (unlikely(dma_mapping_error(c->pdev, wi->umr.mtt_addr)))
409 goto err_unmap_mtts;
410
411 mlx5e_build_umr_wqe(rq, &c->icosq, &wi->umr.wqe, i);
412 }
413
414 return 0;
415
416err_unmap_mtts:
417 while (--i >= 0) {
21c59685 418 struct mlx5e_mpw_info *wi = &rq->mpwqe.info[i];
7e426671
TT
419
420 dma_unmap_single(c->pdev, wi->umr.mtt_addr, mtt_sz,
421 PCI_DMA_TODEVICE);
422 }
21c59685 423 kfree(rq->mpwqe.mtt_no_align);
7e426671 424err_free_wqe_info:
21c59685 425 kfree(rq->mpwqe.info);
7e426671
TT
426
427err_out:
428 return -ENOMEM;
429}
430
431static void mlx5e_rq_free_mpwqe_info(struct mlx5e_rq *rq)
432{
433 int wq_sz = mlx5_wq_ll_get_size(&rq->wq);
434 int mtt_sz = mlx5e_get_wqe_mtt_sz();
435 int i;
436
437 for (i = 0; i < wq_sz; i++) {
21c59685 438 struct mlx5e_mpw_info *wi = &rq->mpwqe.info[i];
7e426671
TT
439
440 dma_unmap_single(rq->pdev, wi->umr.mtt_addr, mtt_sz,
441 PCI_DMA_TODEVICE);
442 }
21c59685
SM
443 kfree(rq->mpwqe.mtt_no_align);
444 kfree(rq->mpwqe.info);
7e426671
TT
445}
446
ec8b9981
TT
447static int mlx5e_create_umr_mkey(struct mlx5e_priv *priv,
448 u64 npages, u8 page_shift,
449 struct mlx5_core_mkey *umr_mkey)
3608ae77
TT
450{
451 struct mlx5_core_dev *mdev = priv->mdev;
3608ae77
TT
452 int inlen = MLX5_ST_SZ_BYTES(create_mkey_in);
453 void *mkc;
454 u32 *in;
455 int err;
456
ec8b9981
TT
457 if (!MLX5E_VALID_NUM_MTTS(npages))
458 return -EINVAL;
459
3608ae77
TT
460 in = mlx5_vzalloc(inlen);
461 if (!in)
462 return -ENOMEM;
463
464 mkc = MLX5_ADDR_OF(create_mkey_in, in, memory_key_mkey_entry);
465
3608ae77
TT
466 MLX5_SET(mkc, mkc, free, 1);
467 MLX5_SET(mkc, mkc, umr_en, 1);
468 MLX5_SET(mkc, mkc, lw, 1);
469 MLX5_SET(mkc, mkc, lr, 1);
470 MLX5_SET(mkc, mkc, access_mode, MLX5_MKC_ACCESS_MODE_MTT);
471
472 MLX5_SET(mkc, mkc, qpn, 0xffffff);
473 MLX5_SET(mkc, mkc, pd, mdev->mlx5e_res.pdn);
ec8b9981 474 MLX5_SET64(mkc, mkc, len, npages << page_shift);
3608ae77
TT
475 MLX5_SET(mkc, mkc, translations_octword_size,
476 MLX5_MTT_OCTW(npages));
ec8b9981 477 MLX5_SET(mkc, mkc, log_page_size, page_shift);
3608ae77 478
ec8b9981 479 err = mlx5_core_create_mkey(mdev, umr_mkey, in, inlen);
3608ae77
TT
480
481 kvfree(in);
482 return err;
483}
484
ec8b9981
TT
485static int mlx5e_create_rq_umr_mkey(struct mlx5e_rq *rq)
486{
487 struct mlx5e_priv *priv = rq->priv;
488 u64 num_mtts = MLX5E_REQUIRED_MTTS(BIT(priv->params.log_rq_size));
489
490 return mlx5e_create_umr_mkey(priv, num_mtts, PAGE_SHIFT, &rq->umr_mkey);
491}
492
f62b8bb8
AV
493static int mlx5e_create_rq(struct mlx5e_channel *c,
494 struct mlx5e_rq_param *param,
495 struct mlx5e_rq *rq)
496{
497 struct mlx5e_priv *priv = c->priv;
498 struct mlx5_core_dev *mdev = priv->mdev;
499 void *rqc = param->rqc;
500 void *rqc_wq = MLX5_ADDR_OF(rqc, rqc, wq);
461017cb 501 u32 byte_count;
1bfecfca
SM
502 u32 frag_sz;
503 int npages;
f62b8bb8
AV
504 int wq_sz;
505 int err;
506 int i;
507
311c7c71
SM
508 param->wq.db_numa_node = cpu_to_node(c->cpu);
509
f62b8bb8
AV
510 err = mlx5_wq_ll_create(mdev, &param->wq, rqc_wq, &rq->wq,
511 &rq->wq_ctrl);
512 if (err)
513 return err;
514
515 rq->wq.db = &rq->wq.db[MLX5_RCV_DBR];
516
517 wq_sz = mlx5_wq_ll_get_size(&rq->wq);
f62b8bb8 518
7e426671
TT
519 rq->wq_type = priv->params.rq_wq_type;
520 rq->pdev = c->pdev;
521 rq->netdev = c->netdev;
522 rq->tstamp = &priv->tstamp;
523 rq->channel = c;
524 rq->ix = c->ix;
525 rq->priv = c->priv;
97bc402d
DB
526
527 rq->xdp_prog = priv->xdp_prog ? bpf_prog_inc(priv->xdp_prog) : NULL;
528 if (IS_ERR(rq->xdp_prog)) {
529 err = PTR_ERR(rq->xdp_prog);
530 rq->xdp_prog = NULL;
531 goto err_rq_wq_destroy;
532 }
7e426671 533
d8bec2b2 534 if (rq->xdp_prog) {
b5503b99 535 rq->buff.map_dir = DMA_BIDIRECTIONAL;
d8bec2b2
MKL
536 rq->rx_headroom = XDP_PACKET_HEADROOM;
537 } else {
538 rq->buff.map_dir = DMA_FROM_DEVICE;
539 rq->rx_headroom = MLX5_RX_HEADROOM;
540 }
b5503b99 541
461017cb
TT
542 switch (priv->params.rq_wq_type) {
543 case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ:
f5f82476
OG
544 if (mlx5e_is_vf_vport_rep(priv)) {
545 err = -EINVAL;
546 goto err_rq_wq_destroy;
547 }
548
461017cb
TT
549 rq->handle_rx_cqe = mlx5e_handle_rx_cqe_mpwrq;
550 rq->alloc_wqe = mlx5e_alloc_rx_mpwqe;
6cd392a0 551 rq->dealloc_wqe = mlx5e_dealloc_rx_mpwqe;
461017cb 552
d9d9f156
TT
553 rq->mpwqe_stride_sz = BIT(priv->params.mpwqe_log_stride_sz);
554 rq->mpwqe_num_strides = BIT(priv->params.mpwqe_log_num_strides);
1bfecfca
SM
555
556 rq->buff.wqe_sz = rq->mpwqe_stride_sz * rq->mpwqe_num_strides;
557 byte_count = rq->buff.wqe_sz;
ec8b9981
TT
558
559 err = mlx5e_create_rq_umr_mkey(rq);
7e426671
TT
560 if (err)
561 goto err_rq_wq_destroy;
ec8b9981
TT
562 rq->mkey_be = cpu_to_be32(rq->umr_mkey.key);
563
564 err = mlx5e_rq_alloc_mpwqe_info(rq, c);
565 if (err)
566 goto err_destroy_umr_mkey;
461017cb
TT
567 break;
568 default: /* MLX5_WQ_TYPE_LINKED_LIST */
1bfecfca
SM
569 rq->dma_info = kzalloc_node(wq_sz * sizeof(*rq->dma_info),
570 GFP_KERNEL, cpu_to_node(c->cpu));
571 if (!rq->dma_info) {
461017cb
TT
572 err = -ENOMEM;
573 goto err_rq_wq_destroy;
574 }
1bfecfca 575
f5f82476
OG
576 if (mlx5e_is_vf_vport_rep(priv))
577 rq->handle_rx_cqe = mlx5e_handle_rx_cqe_rep;
578 else
579 rq->handle_rx_cqe = mlx5e_handle_rx_cqe;
580
461017cb 581 rq->alloc_wqe = mlx5e_alloc_rx_wqe;
6cd392a0 582 rq->dealloc_wqe = mlx5e_dealloc_rx_wqe;
461017cb 583
1bfecfca 584 rq->buff.wqe_sz = (priv->params.lro_en) ?
461017cb
TT
585 priv->params.lro_wqe_sz :
586 MLX5E_SW2HW_MTU(priv->netdev->mtu);
1bfecfca
SM
587 byte_count = rq->buff.wqe_sz;
588
589 /* calc the required page order */
d8bec2b2 590 frag_sz = rq->rx_headroom +
1bfecfca
SM
591 byte_count /* packet data */ +
592 SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
593 frag_sz = SKB_DATA_ALIGN(frag_sz);
594
595 npages = DIV_ROUND_UP(frag_sz, PAGE_SIZE);
596 rq->buff.page_order = order_base_2(npages);
597
461017cb 598 byte_count |= MLX5_HW_START_PADDING;
7e426671 599 rq->mkey_be = c->mkey_be;
461017cb 600 }
f62b8bb8
AV
601
602 for (i = 0; i < wq_sz; i++) {
603 struct mlx5e_rx_wqe *wqe = mlx5_wq_ll_get_wqe(&rq->wq, i);
604
461017cb 605 wqe->data.byte_count = cpu_to_be32(byte_count);
7e426671 606 wqe->data.lkey = rq->mkey_be;
f62b8bb8
AV
607 }
608
cb3c7fd4
GR
609 INIT_WORK(&rq->am.work, mlx5e_rx_am_work);
610 rq->am.mode = priv->params.rx_cq_period_mode;
611
4415a031
TT
612 rq->page_cache.head = 0;
613 rq->page_cache.tail = 0;
614
f62b8bb8
AV
615 return 0;
616
ec8b9981
TT
617err_destroy_umr_mkey:
618 mlx5_core_destroy_mkey(mdev, &rq->umr_mkey);
619
f62b8bb8 620err_rq_wq_destroy:
97bc402d
DB
621 if (rq->xdp_prog)
622 bpf_prog_put(rq->xdp_prog);
f62b8bb8
AV
623 mlx5_wq_destroy(&rq->wq_ctrl);
624
625 return err;
626}
627
628static void mlx5e_destroy_rq(struct mlx5e_rq *rq)
629{
4415a031
TT
630 int i;
631
86994156
RS
632 if (rq->xdp_prog)
633 bpf_prog_put(rq->xdp_prog);
634
461017cb
TT
635 switch (rq->wq_type) {
636 case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ:
7e426671 637 mlx5e_rq_free_mpwqe_info(rq);
ec8b9981 638 mlx5_core_destroy_mkey(rq->priv->mdev, &rq->umr_mkey);
461017cb
TT
639 break;
640 default: /* MLX5_WQ_TYPE_LINKED_LIST */
1bfecfca 641 kfree(rq->dma_info);
461017cb
TT
642 }
643
4415a031
TT
644 for (i = rq->page_cache.head; i != rq->page_cache.tail;
645 i = (i + 1) & (MLX5E_CACHE_SIZE - 1)) {
646 struct mlx5e_dma_info *dma_info = &rq->page_cache.page_cache[i];
647
648 mlx5e_page_release(rq, dma_info, false);
649 }
f62b8bb8
AV
650 mlx5_wq_destroy(&rq->wq_ctrl);
651}
652
653static int mlx5e_enable_rq(struct mlx5e_rq *rq, struct mlx5e_rq_param *param)
654{
50cfa25a 655 struct mlx5e_priv *priv = rq->priv;
f62b8bb8
AV
656 struct mlx5_core_dev *mdev = priv->mdev;
657
658 void *in;
659 void *rqc;
660 void *wq;
661 int inlen;
662 int err;
663
664 inlen = MLX5_ST_SZ_BYTES(create_rq_in) +
665 sizeof(u64) * rq->wq_ctrl.buf.npages;
666 in = mlx5_vzalloc(inlen);
667 if (!in)
668 return -ENOMEM;
669
670 rqc = MLX5_ADDR_OF(create_rq_in, in, ctx);
671 wq = MLX5_ADDR_OF(rqc, rqc, wq);
672
673 memcpy(rqc, param->rqc, sizeof(param->rqc));
674
97de9f31 675 MLX5_SET(rqc, rqc, cqn, rq->cq.mcq.cqn);
f62b8bb8 676 MLX5_SET(rqc, rqc, state, MLX5_RQC_STATE_RST);
36350114 677 MLX5_SET(rqc, rqc, vsd, priv->params.vlan_strip_disable);
f62b8bb8 678 MLX5_SET(wq, wq, log_wq_pg_sz, rq->wq_ctrl.buf.page_shift -
68cdf5d6 679 MLX5_ADAPTER_PAGE_SHIFT);
f62b8bb8
AV
680 MLX5_SET64(wq, wq, dbr_addr, rq->wq_ctrl.db.dma);
681
682 mlx5_fill_page_array(&rq->wq_ctrl.buf,
683 (__be64 *)MLX5_ADDR_OF(wq, wq, pas));
684
7db22ffb 685 err = mlx5_core_create_rq(mdev, in, inlen, &rq->rqn);
f62b8bb8
AV
686
687 kvfree(in);
688
689 return err;
690}
691
36350114
GP
692static int mlx5e_modify_rq_state(struct mlx5e_rq *rq, int curr_state,
693 int next_state)
f62b8bb8
AV
694{
695 struct mlx5e_channel *c = rq->channel;
696 struct mlx5e_priv *priv = c->priv;
697 struct mlx5_core_dev *mdev = priv->mdev;
698
699 void *in;
700 void *rqc;
701 int inlen;
702 int err;
703
704 inlen = MLX5_ST_SZ_BYTES(modify_rq_in);
705 in = mlx5_vzalloc(inlen);
706 if (!in)
707 return -ENOMEM;
708
709 rqc = MLX5_ADDR_OF(modify_rq_in, in, ctx);
710
711 MLX5_SET(modify_rq_in, in, rq_state, curr_state);
712 MLX5_SET(rqc, rqc, state, next_state);
713
7db22ffb 714 err = mlx5_core_modify_rq(mdev, rq->rqn, in, inlen);
f62b8bb8
AV
715
716 kvfree(in);
717
718 return err;
719}
720
36350114
GP
721static int mlx5e_modify_rq_vsd(struct mlx5e_rq *rq, bool vsd)
722{
723 struct mlx5e_channel *c = rq->channel;
724 struct mlx5e_priv *priv = c->priv;
725 struct mlx5_core_dev *mdev = priv->mdev;
726
727 void *in;
728 void *rqc;
729 int inlen;
730 int err;
731
732 inlen = MLX5_ST_SZ_BYTES(modify_rq_in);
733 in = mlx5_vzalloc(inlen);
734 if (!in)
735 return -ENOMEM;
736
737 rqc = MLX5_ADDR_OF(modify_rq_in, in, ctx);
738
739 MLX5_SET(modify_rq_in, in, rq_state, MLX5_RQC_STATE_RDY);
83b502a1
AV
740 MLX5_SET64(modify_rq_in, in, modify_bitmask,
741 MLX5_MODIFY_RQ_IN_MODIFY_BITMASK_VSD);
36350114
GP
742 MLX5_SET(rqc, rqc, vsd, vsd);
743 MLX5_SET(rqc, rqc, state, MLX5_RQC_STATE_RDY);
744
745 err = mlx5_core_modify_rq(mdev, rq->rqn, in, inlen);
746
747 kvfree(in);
748
749 return err;
750}
751
f62b8bb8
AV
752static void mlx5e_disable_rq(struct mlx5e_rq *rq)
753{
50cfa25a 754 mlx5_core_destroy_rq(rq->priv->mdev, rq->rqn);
f62b8bb8
AV
755}
756
757static int mlx5e_wait_for_min_rx_wqes(struct mlx5e_rq *rq)
758{
01c196a2 759 unsigned long exp_time = jiffies + msecs_to_jiffies(20000);
f62b8bb8
AV
760 struct mlx5e_channel *c = rq->channel;
761 struct mlx5e_priv *priv = c->priv;
762 struct mlx5_wq_ll *wq = &rq->wq;
f62b8bb8 763
01c196a2 764 while (time_before(jiffies, exp_time)) {
f62b8bb8
AV
765 if (wq->cur_sz >= priv->params.min_rx_wqes)
766 return 0;
767
768 msleep(20);
769 }
770
771 return -ETIMEDOUT;
772}
773
f2fde18c
SM
774static void mlx5e_free_rx_descs(struct mlx5e_rq *rq)
775{
776 struct mlx5_wq_ll *wq = &rq->wq;
777 struct mlx5e_rx_wqe *wqe;
778 __be16 wqe_ix_be;
779 u16 wqe_ix;
780
8484f9ed
SM
781 /* UMR WQE (if in progress) is always at wq->head */
782 if (test_bit(MLX5E_RQ_STATE_UMR_WQE_IN_PROGRESS, &rq->state))
21c59685 783 mlx5e_free_rx_mpwqe(rq, &rq->mpwqe.info[wq->head]);
8484f9ed 784
f2fde18c
SM
785 while (!mlx5_wq_ll_is_empty(wq)) {
786 wqe_ix_be = *wq->tail_next;
787 wqe_ix = be16_to_cpu(wqe_ix_be);
788 wqe = mlx5_wq_ll_get_wqe(&rq->wq, wqe_ix);
789 rq->dealloc_wqe(rq, wqe_ix);
790 mlx5_wq_ll_pop(&rq->wq, wqe_ix_be,
791 &wqe->next.next_wqe_index);
792 }
793}
794
f62b8bb8
AV
795static int mlx5e_open_rq(struct mlx5e_channel *c,
796 struct mlx5e_rq_param *param,
797 struct mlx5e_rq *rq)
798{
d3c9bc27
TT
799 struct mlx5e_sq *sq = &c->icosq;
800 u16 pi = sq->pc & sq->wq.sz_m1;
f62b8bb8
AV
801 int err;
802
803 err = mlx5e_create_rq(c, param, rq);
804 if (err)
805 return err;
806
807 err = mlx5e_enable_rq(rq, param);
808 if (err)
809 goto err_destroy_rq;
810
c0f1147d 811 set_bit(MLX5E_RQ_STATE_ENABLED, &rq->state);
36350114 812 err = mlx5e_modify_rq_state(rq, MLX5_RQC_STATE_RST, MLX5_RQC_STATE_RDY);
f62b8bb8
AV
813 if (err)
814 goto err_disable_rq;
815
cb3c7fd4
GR
816 if (param->am_enabled)
817 set_bit(MLX5E_RQ_STATE_AM, &c->rq.state);
818
f10b7cc7
SM
819 sq->db.ico_wqe[pi].opcode = MLX5_OPCODE_NOP;
820 sq->db.ico_wqe[pi].num_wqebbs = 1;
d3c9bc27 821 mlx5e_send_nop(sq, true); /* trigger mlx5e_post_rx_wqes() */
f62b8bb8
AV
822
823 return 0;
824
825err_disable_rq:
c0f1147d 826 clear_bit(MLX5E_RQ_STATE_ENABLED, &rq->state);
f62b8bb8
AV
827 mlx5e_disable_rq(rq);
828err_destroy_rq:
829 mlx5e_destroy_rq(rq);
830
831 return err;
832}
833
834static void mlx5e_close_rq(struct mlx5e_rq *rq)
835{
c0f1147d 836 clear_bit(MLX5E_RQ_STATE_ENABLED, &rq->state);
f62b8bb8 837 napi_synchronize(&rq->channel->napi); /* prevent mlx5e_post_rx_wqes */
cb3c7fd4
GR
838 cancel_work_sync(&rq->am.work);
839
f62b8bb8 840 mlx5e_disable_rq(rq);
6cd392a0 841 mlx5e_free_rx_descs(rq);
f62b8bb8
AV
842 mlx5e_destroy_rq(rq);
843}
844
b5503b99
SM
845static void mlx5e_free_sq_xdp_db(struct mlx5e_sq *sq)
846{
847 kfree(sq->db.xdp.di);
848 kfree(sq->db.xdp.wqe_info);
849}
850
851static int mlx5e_alloc_sq_xdp_db(struct mlx5e_sq *sq, int numa)
852{
853 int wq_sz = mlx5_wq_cyc_get_size(&sq->wq);
854
855 sq->db.xdp.di = kzalloc_node(sizeof(*sq->db.xdp.di) * wq_sz,
856 GFP_KERNEL, numa);
857 sq->db.xdp.wqe_info = kzalloc_node(sizeof(*sq->db.xdp.wqe_info) * wq_sz,
858 GFP_KERNEL, numa);
859 if (!sq->db.xdp.di || !sq->db.xdp.wqe_info) {
860 mlx5e_free_sq_xdp_db(sq);
861 return -ENOMEM;
862 }
863
864 return 0;
865}
866
f10b7cc7 867static void mlx5e_free_sq_ico_db(struct mlx5e_sq *sq)
f62b8bb8 868{
f10b7cc7 869 kfree(sq->db.ico_wqe);
f62b8bb8
AV
870}
871
f10b7cc7
SM
872static int mlx5e_alloc_sq_ico_db(struct mlx5e_sq *sq, int numa)
873{
874 u8 wq_sz = mlx5_wq_cyc_get_size(&sq->wq);
875
876 sq->db.ico_wqe = kzalloc_node(sizeof(*sq->db.ico_wqe) * wq_sz,
877 GFP_KERNEL, numa);
878 if (!sq->db.ico_wqe)
879 return -ENOMEM;
880
881 return 0;
882}
883
884static void mlx5e_free_sq_txq_db(struct mlx5e_sq *sq)
885{
886 kfree(sq->db.txq.wqe_info);
887 kfree(sq->db.txq.dma_fifo);
888 kfree(sq->db.txq.skb);
889}
890
891static int mlx5e_alloc_sq_txq_db(struct mlx5e_sq *sq, int numa)
f62b8bb8
AV
892{
893 int wq_sz = mlx5_wq_cyc_get_size(&sq->wq);
894 int df_sz = wq_sz * MLX5_SEND_WQEBB_NUM_DS;
895
f10b7cc7
SM
896 sq->db.txq.skb = kzalloc_node(wq_sz * sizeof(*sq->db.txq.skb),
897 GFP_KERNEL, numa);
898 sq->db.txq.dma_fifo = kzalloc_node(df_sz * sizeof(*sq->db.txq.dma_fifo),
899 GFP_KERNEL, numa);
900 sq->db.txq.wqe_info = kzalloc_node(wq_sz * sizeof(*sq->db.txq.wqe_info),
901 GFP_KERNEL, numa);
902 if (!sq->db.txq.skb || !sq->db.txq.dma_fifo || !sq->db.txq.wqe_info) {
903 mlx5e_free_sq_txq_db(sq);
f62b8bb8
AV
904 return -ENOMEM;
905 }
906
907 sq->dma_fifo_mask = df_sz - 1;
908
909 return 0;
910}
911
f10b7cc7
SM
912static void mlx5e_free_sq_db(struct mlx5e_sq *sq)
913{
914 switch (sq->type) {
915 case MLX5E_SQ_TXQ:
916 mlx5e_free_sq_txq_db(sq);
917 break;
918 case MLX5E_SQ_ICO:
919 mlx5e_free_sq_ico_db(sq);
920 break;
b5503b99
SM
921 case MLX5E_SQ_XDP:
922 mlx5e_free_sq_xdp_db(sq);
923 break;
f10b7cc7
SM
924 }
925}
926
927static int mlx5e_alloc_sq_db(struct mlx5e_sq *sq, int numa)
928{
929 switch (sq->type) {
930 case MLX5E_SQ_TXQ:
931 return mlx5e_alloc_sq_txq_db(sq, numa);
932 case MLX5E_SQ_ICO:
933 return mlx5e_alloc_sq_ico_db(sq, numa);
b5503b99
SM
934 case MLX5E_SQ_XDP:
935 return mlx5e_alloc_sq_xdp_db(sq, numa);
f10b7cc7
SM
936 }
937
938 return 0;
939}
940
b5503b99
SM
941static int mlx5e_sq_get_max_wqebbs(u8 sq_type)
942{
943 switch (sq_type) {
944 case MLX5E_SQ_ICO:
945 return MLX5E_ICOSQ_MAX_WQEBBS;
946 case MLX5E_SQ_XDP:
947 return MLX5E_XDP_TX_WQEBBS;
948 }
949 return MLX5_SEND_WQE_MAX_WQEBBS;
950}
951
f62b8bb8
AV
952static int mlx5e_create_sq(struct mlx5e_channel *c,
953 int tc,
954 struct mlx5e_sq_param *param,
955 struct mlx5e_sq *sq)
956{
957 struct mlx5e_priv *priv = c->priv;
958 struct mlx5_core_dev *mdev = priv->mdev;
959
960 void *sqc = param->sqc;
961 void *sqc_wq = MLX5_ADDR_OF(sqc, sqc, wq);
962 int err;
963
f10b7cc7
SM
964 sq->type = param->type;
965 sq->pdev = c->pdev;
966 sq->tstamp = &priv->tstamp;
967 sq->mkey_be = c->mkey_be;
968 sq->channel = c;
969 sq->tc = tc;
970
30aa60b3 971 err = mlx5_alloc_bfreg(mdev, &sq->bfreg, MLX5_CAP_GEN(mdev, bf), false);
f62b8bb8
AV
972 if (err)
973 return err;
974
311c7c71
SM
975 param->wq.db_numa_node = cpu_to_node(c->cpu);
976
f62b8bb8
AV
977 err = mlx5_wq_cyc_create(mdev, &param->wq, sqc_wq, &sq->wq,
978 &sq->wq_ctrl);
979 if (err)
980 goto err_unmap_free_uar;
981
982 sq->wq.db = &sq->wq.db[MLX5_SND_DBR];
30aa60b3 983 if (sq->bfreg.wc)
0ba42241 984 set_bit(MLX5E_SQ_STATE_BF_ENABLE, &sq->state);
30aa60b3 985
f62b8bb8 986 sq->bf_buf_size = (1 << MLX5_CAP_GEN(mdev, log_bf_reg_size)) / 2;
58d52291 987 sq->max_inline = param->max_inline;
cff92d7c 988 sq->min_inline_mode =
34e4e990 989 MLX5_CAP_ETH(mdev, wqe_inline_mode) == MLX5_CAP_INLINE_MODE_VPORT_CONTEXT ?
cff92d7c 990 param->min_inline_mode : 0;
f62b8bb8 991
7ec0bb22
DC
992 err = mlx5e_alloc_sq_db(sq, cpu_to_node(c->cpu));
993 if (err)
f62b8bb8
AV
994 goto err_sq_wq_destroy;
995
f10b7cc7 996 if (sq->type == MLX5E_SQ_TXQ) {
d3c9bc27
TT
997 int txq_ix;
998
999 txq_ix = c->ix + tc * priv->params.num_channels;
1000 sq->txq = netdev_get_tx_queue(priv->netdev, txq_ix);
1001 priv->txq_to_sq_map[txq_ix] = sq;
1002 }
f62b8bb8 1003
b5503b99 1004 sq->edge = (sq->wq.sz_m1 + 1) - mlx5e_sq_get_max_wqebbs(sq->type);
88a85f99 1005 sq->bf_budget = MLX5E_SQ_BF_BUDGET;
f62b8bb8
AV
1006
1007 return 0;
1008
1009err_sq_wq_destroy:
1010 mlx5_wq_destroy(&sq->wq_ctrl);
1011
1012err_unmap_free_uar:
30aa60b3 1013 mlx5_free_bfreg(mdev, &sq->bfreg);
f62b8bb8
AV
1014
1015 return err;
1016}
1017
1018static void mlx5e_destroy_sq(struct mlx5e_sq *sq)
1019{
1020 struct mlx5e_channel *c = sq->channel;
1021 struct mlx5e_priv *priv = c->priv;
1022
1023 mlx5e_free_sq_db(sq);
1024 mlx5_wq_destroy(&sq->wq_ctrl);
30aa60b3 1025 mlx5_free_bfreg(priv->mdev, &sq->bfreg);
f62b8bb8
AV
1026}
1027
1028static int mlx5e_enable_sq(struct mlx5e_sq *sq, struct mlx5e_sq_param *param)
1029{
1030 struct mlx5e_channel *c = sq->channel;
1031 struct mlx5e_priv *priv = c->priv;
1032 struct mlx5_core_dev *mdev = priv->mdev;
1033
1034 void *in;
1035 void *sqc;
1036 void *wq;
1037 int inlen;
1038 int err;
1039
1040 inlen = MLX5_ST_SZ_BYTES(create_sq_in) +
1041 sizeof(u64) * sq->wq_ctrl.buf.npages;
1042 in = mlx5_vzalloc(inlen);
1043 if (!in)
1044 return -ENOMEM;
1045
1046 sqc = MLX5_ADDR_OF(create_sq_in, in, ctx);
1047 wq = MLX5_ADDR_OF(sqc, sqc, wq);
1048
1049 memcpy(sqc, param->sqc, sizeof(param->sqc));
1050
f10b7cc7
SM
1051 MLX5_SET(sqc, sqc, tis_num_0, param->type == MLX5E_SQ_ICO ?
1052 0 : priv->tisn[sq->tc]);
d3c9bc27 1053 MLX5_SET(sqc, sqc, cqn, sq->cq.mcq.cqn);
cff92d7c 1054 MLX5_SET(sqc, sqc, min_wqe_inline_mode, sq->min_inline_mode);
f62b8bb8 1055 MLX5_SET(sqc, sqc, state, MLX5_SQC_STATE_RST);
f10b7cc7 1056 MLX5_SET(sqc, sqc, tis_lst_sz, param->type == MLX5E_SQ_ICO ? 0 : 1);
f62b8bb8
AV
1057
1058 MLX5_SET(wq, wq, wq_type, MLX5_WQ_TYPE_CYCLIC);
30aa60b3 1059 MLX5_SET(wq, wq, uar_page, sq->bfreg.index);
f62b8bb8 1060 MLX5_SET(wq, wq, log_wq_pg_sz, sq->wq_ctrl.buf.page_shift -
68cdf5d6 1061 MLX5_ADAPTER_PAGE_SHIFT);
f62b8bb8
AV
1062 MLX5_SET64(wq, wq, dbr_addr, sq->wq_ctrl.db.dma);
1063
1064 mlx5_fill_page_array(&sq->wq_ctrl.buf,
1065 (__be64 *)MLX5_ADDR_OF(wq, wq, pas));
1066
7db22ffb 1067 err = mlx5_core_create_sq(mdev, in, inlen, &sq->sqn);
f62b8bb8
AV
1068
1069 kvfree(in);
1070
1071 return err;
1072}
1073
507f0c81
YP
1074static int mlx5e_modify_sq(struct mlx5e_sq *sq, int curr_state,
1075 int next_state, bool update_rl, int rl_index)
f62b8bb8
AV
1076{
1077 struct mlx5e_channel *c = sq->channel;
1078 struct mlx5e_priv *priv = c->priv;
1079 struct mlx5_core_dev *mdev = priv->mdev;
1080
1081 void *in;
1082 void *sqc;
1083 int inlen;
1084 int err;
1085
1086 inlen = MLX5_ST_SZ_BYTES(modify_sq_in);
1087 in = mlx5_vzalloc(inlen);
1088 if (!in)
1089 return -ENOMEM;
1090
1091 sqc = MLX5_ADDR_OF(modify_sq_in, in, ctx);
1092
1093 MLX5_SET(modify_sq_in, in, sq_state, curr_state);
1094 MLX5_SET(sqc, sqc, state, next_state);
507f0c81
YP
1095 if (update_rl && next_state == MLX5_SQC_STATE_RDY) {
1096 MLX5_SET64(modify_sq_in, in, modify_bitmask, 1);
1097 MLX5_SET(sqc, sqc, packet_pacing_rate_limit_index, rl_index);
1098 }
f62b8bb8 1099
7db22ffb 1100 err = mlx5_core_modify_sq(mdev, sq->sqn, in, inlen);
f62b8bb8
AV
1101
1102 kvfree(in);
1103
1104 return err;
1105}
1106
1107static void mlx5e_disable_sq(struct mlx5e_sq *sq)
1108{
1109 struct mlx5e_channel *c = sq->channel;
1110 struct mlx5e_priv *priv = c->priv;
1111 struct mlx5_core_dev *mdev = priv->mdev;
1112
7db22ffb 1113 mlx5_core_destroy_sq(mdev, sq->sqn);
507f0c81
YP
1114 if (sq->rate_limit)
1115 mlx5_rl_remove_rate(mdev, sq->rate_limit);
f62b8bb8
AV
1116}
1117
1118static int mlx5e_open_sq(struct mlx5e_channel *c,
1119 int tc,
1120 struct mlx5e_sq_param *param,
1121 struct mlx5e_sq *sq)
1122{
1123 int err;
1124
1125 err = mlx5e_create_sq(c, tc, param, sq);
1126 if (err)
1127 return err;
1128
1129 err = mlx5e_enable_sq(sq, param);
1130 if (err)
1131 goto err_destroy_sq;
1132
c0f1147d 1133 set_bit(MLX5E_SQ_STATE_ENABLED, &sq->state);
507f0c81
YP
1134 err = mlx5e_modify_sq(sq, MLX5_SQC_STATE_RST, MLX5_SQC_STATE_RDY,
1135 false, 0);
f62b8bb8
AV
1136 if (err)
1137 goto err_disable_sq;
1138
d3c9bc27 1139 if (sq->txq) {
d3c9bc27
TT
1140 netdev_tx_reset_queue(sq->txq);
1141 netif_tx_start_queue(sq->txq);
1142 }
f62b8bb8
AV
1143
1144 return 0;
1145
1146err_disable_sq:
c0f1147d 1147 clear_bit(MLX5E_SQ_STATE_ENABLED, &sq->state);
f62b8bb8
AV
1148 mlx5e_disable_sq(sq);
1149err_destroy_sq:
1150 mlx5e_destroy_sq(sq);
1151
1152 return err;
1153}
1154
1155static inline void netif_tx_disable_queue(struct netdev_queue *txq)
1156{
1157 __netif_tx_lock_bh(txq);
1158 netif_tx_stop_queue(txq);
1159 __netif_tx_unlock_bh(txq);
1160}
1161
1162static void mlx5e_close_sq(struct mlx5e_sq *sq)
1163{
c0f1147d 1164 clear_bit(MLX5E_SQ_STATE_ENABLED, &sq->state);
6e8dd6d6
SM
1165 /* prevent netif_tx_wake_queue */
1166 napi_synchronize(&sq->channel->napi);
29429f33 1167
d3c9bc27 1168 if (sq->txq) {
d3c9bc27 1169 netif_tx_disable_queue(sq->txq);
f62b8bb8 1170
6e8dd6d6 1171 /* last doorbell out, godspeed .. */
f10b7cc7
SM
1172 if (mlx5e_sq_has_room_for(sq, 1)) {
1173 sq->db.txq.skb[(sq->pc & sq->wq.sz_m1)] = NULL;
d3c9bc27 1174 mlx5e_send_nop(sq, true);
f10b7cc7 1175 }
29429f33 1176 }
f62b8bb8 1177
f62b8bb8 1178 mlx5e_disable_sq(sq);
b5503b99 1179 mlx5e_free_sq_descs(sq);
f62b8bb8
AV
1180 mlx5e_destroy_sq(sq);
1181}
1182
1183static int mlx5e_create_cq(struct mlx5e_channel *c,
1184 struct mlx5e_cq_param *param,
1185 struct mlx5e_cq *cq)
1186{
1187 struct mlx5e_priv *priv = c->priv;
1188 struct mlx5_core_dev *mdev = priv->mdev;
1189 struct mlx5_core_cq *mcq = &cq->mcq;
1190 int eqn_not_used;
0b6e26ce 1191 unsigned int irqn;
f62b8bb8
AV
1192 int err;
1193 u32 i;
1194
311c7c71
SM
1195 param->wq.buf_numa_node = cpu_to_node(c->cpu);
1196 param->wq.db_numa_node = cpu_to_node(c->cpu);
f62b8bb8
AV
1197 param->eq_ix = c->ix;
1198
1199 err = mlx5_cqwq_create(mdev, &param->wq, param->cqc, &cq->wq,
1200 &cq->wq_ctrl);
1201 if (err)
1202 return err;
1203
1204 mlx5_vector2eqn(mdev, param->eq_ix, &eqn_not_used, &irqn);
1205
1206 cq->napi = &c->napi;
1207
1208 mcq->cqe_sz = 64;
1209 mcq->set_ci_db = cq->wq_ctrl.db.db;
1210 mcq->arm_db = cq->wq_ctrl.db.db + 1;
1211 *mcq->set_ci_db = 0;
1212 *mcq->arm_db = 0;
1213 mcq->vector = param->eq_ix;
1214 mcq->comp = mlx5e_completion_event;
1215 mcq->event = mlx5e_cq_error_event;
1216 mcq->irqn = irqn;
f62b8bb8
AV
1217
1218 for (i = 0; i < mlx5_cqwq_get_size(&cq->wq); i++) {
1219 struct mlx5_cqe64 *cqe = mlx5_cqwq_get_wqe(&cq->wq, i);
1220
1221 cqe->op_own = 0xf1;
1222 }
1223
1224 cq->channel = c;
50cfa25a 1225 cq->priv = priv;
f62b8bb8
AV
1226
1227 return 0;
1228}
1229
1230static void mlx5e_destroy_cq(struct mlx5e_cq *cq)
1231{
1c1b5228 1232 mlx5_cqwq_destroy(&cq->wq_ctrl);
f62b8bb8
AV
1233}
1234
1235static int mlx5e_enable_cq(struct mlx5e_cq *cq, struct mlx5e_cq_param *param)
1236{
50cfa25a 1237 struct mlx5e_priv *priv = cq->priv;
f62b8bb8
AV
1238 struct mlx5_core_dev *mdev = priv->mdev;
1239 struct mlx5_core_cq *mcq = &cq->mcq;
1240
1241 void *in;
1242 void *cqc;
1243 int inlen;
0b6e26ce 1244 unsigned int irqn_not_used;
f62b8bb8
AV
1245 int eqn;
1246 int err;
1247
1248 inlen = MLX5_ST_SZ_BYTES(create_cq_in) +
1c1b5228 1249 sizeof(u64) * cq->wq_ctrl.frag_buf.npages;
f62b8bb8
AV
1250 in = mlx5_vzalloc(inlen);
1251 if (!in)
1252 return -ENOMEM;
1253
1254 cqc = MLX5_ADDR_OF(create_cq_in, in, cq_context);
1255
1256 memcpy(cqc, param->cqc, sizeof(param->cqc));
1257
1c1b5228
TT
1258 mlx5_fill_page_frag_array(&cq->wq_ctrl.frag_buf,
1259 (__be64 *)MLX5_ADDR_OF(create_cq_in, in, pas));
f62b8bb8
AV
1260
1261 mlx5_vector2eqn(mdev, param->eq_ix, &eqn, &irqn_not_used);
1262
9908aa29 1263 MLX5_SET(cqc, cqc, cq_period_mode, param->cq_period_mode);
f62b8bb8 1264 MLX5_SET(cqc, cqc, c_eqn, eqn);
30aa60b3 1265 MLX5_SET(cqc, cqc, uar_page, mdev->priv.uar->index);
1c1b5228 1266 MLX5_SET(cqc, cqc, log_page_size, cq->wq_ctrl.frag_buf.page_shift -
68cdf5d6 1267 MLX5_ADAPTER_PAGE_SHIFT);
f62b8bb8
AV
1268 MLX5_SET64(cqc, cqc, dbr_addr, cq->wq_ctrl.db.dma);
1269
1270 err = mlx5_core_create_cq(mdev, mcq, in, inlen);
1271
1272 kvfree(in);
1273
1274 if (err)
1275 return err;
1276
1277 mlx5e_cq_arm(cq);
1278
1279 return 0;
1280}
1281
1282static void mlx5e_disable_cq(struct mlx5e_cq *cq)
1283{
50cfa25a 1284 struct mlx5e_priv *priv = cq->priv;
f62b8bb8
AV
1285 struct mlx5_core_dev *mdev = priv->mdev;
1286
1287 mlx5_core_destroy_cq(mdev, &cq->mcq);
1288}
1289
1290static int mlx5e_open_cq(struct mlx5e_channel *c,
1291 struct mlx5e_cq_param *param,
1292 struct mlx5e_cq *cq,
9908aa29 1293 struct mlx5e_cq_moder moderation)
f62b8bb8
AV
1294{
1295 int err;
1296 struct mlx5e_priv *priv = c->priv;
1297 struct mlx5_core_dev *mdev = priv->mdev;
1298
1299 err = mlx5e_create_cq(c, param, cq);
1300 if (err)
1301 return err;
1302
1303 err = mlx5e_enable_cq(cq, param);
1304 if (err)
1305 goto err_destroy_cq;
1306
7524a5d8
GP
1307 if (MLX5_CAP_GEN(mdev, cq_moderation))
1308 mlx5_core_modify_cq_moderation(mdev, &cq->mcq,
9908aa29
TT
1309 moderation.usec,
1310 moderation.pkts);
f62b8bb8
AV
1311 return 0;
1312
1313err_destroy_cq:
1314 mlx5e_destroy_cq(cq);
1315
1316 return err;
1317}
1318
1319static void mlx5e_close_cq(struct mlx5e_cq *cq)
1320{
1321 mlx5e_disable_cq(cq);
1322 mlx5e_destroy_cq(cq);
1323}
1324
1325static int mlx5e_get_cpu(struct mlx5e_priv *priv, int ix)
1326{
1327 return cpumask_first(priv->mdev->priv.irq_info[ix].mask);
1328}
1329
1330static int mlx5e_open_tx_cqs(struct mlx5e_channel *c,
1331 struct mlx5e_channel_param *cparam)
1332{
1333 struct mlx5e_priv *priv = c->priv;
1334 int err;
1335 int tc;
1336
1337 for (tc = 0; tc < c->num_tc; tc++) {
1338 err = mlx5e_open_cq(c, &cparam->tx_cq, &c->sq[tc].cq,
9908aa29 1339 priv->params.tx_cq_moderation);
f62b8bb8
AV
1340 if (err)
1341 goto err_close_tx_cqs;
f62b8bb8
AV
1342 }
1343
1344 return 0;
1345
1346err_close_tx_cqs:
1347 for (tc--; tc >= 0; tc--)
1348 mlx5e_close_cq(&c->sq[tc].cq);
1349
1350 return err;
1351}
1352
1353static void mlx5e_close_tx_cqs(struct mlx5e_channel *c)
1354{
1355 int tc;
1356
1357 for (tc = 0; tc < c->num_tc; tc++)
1358 mlx5e_close_cq(&c->sq[tc].cq);
1359}
1360
1361static int mlx5e_open_sqs(struct mlx5e_channel *c,
1362 struct mlx5e_channel_param *cparam)
1363{
1364 int err;
1365 int tc;
1366
1367 for (tc = 0; tc < c->num_tc; tc++) {
1368 err = mlx5e_open_sq(c, tc, &cparam->sq, &c->sq[tc]);
1369 if (err)
1370 goto err_close_sqs;
1371 }
1372
1373 return 0;
1374
1375err_close_sqs:
1376 for (tc--; tc >= 0; tc--)
1377 mlx5e_close_sq(&c->sq[tc]);
1378
1379 return err;
1380}
1381
1382static void mlx5e_close_sqs(struct mlx5e_channel *c)
1383{
1384 int tc;
1385
1386 for (tc = 0; tc < c->num_tc; tc++)
1387 mlx5e_close_sq(&c->sq[tc]);
1388}
1389
5283af89 1390static void mlx5e_build_channeltc_to_txq_map(struct mlx5e_priv *priv, int ix)
03289b88
SM
1391{
1392 int i;
1393
6bfd390b 1394 for (i = 0; i < priv->profile->max_tc; i++)
5283af89
RS
1395 priv->channeltc_to_txq_map[ix][i] =
1396 ix + i * priv->params.num_channels;
03289b88
SM
1397}
1398
507f0c81
YP
1399static int mlx5e_set_sq_maxrate(struct net_device *dev,
1400 struct mlx5e_sq *sq, u32 rate)
1401{
1402 struct mlx5e_priv *priv = netdev_priv(dev);
1403 struct mlx5_core_dev *mdev = priv->mdev;
1404 u16 rl_index = 0;
1405 int err;
1406
1407 if (rate == sq->rate_limit)
1408 /* nothing to do */
1409 return 0;
1410
1411 if (sq->rate_limit)
1412 /* remove current rl index to free space to next ones */
1413 mlx5_rl_remove_rate(mdev, sq->rate_limit);
1414
1415 sq->rate_limit = 0;
1416
1417 if (rate) {
1418 err = mlx5_rl_add_rate(mdev, rate, &rl_index);
1419 if (err) {
1420 netdev_err(dev, "Failed configuring rate %u: %d\n",
1421 rate, err);
1422 return err;
1423 }
1424 }
1425
1426 err = mlx5e_modify_sq(sq, MLX5_SQC_STATE_RDY,
1427 MLX5_SQC_STATE_RDY, true, rl_index);
1428 if (err) {
1429 netdev_err(dev, "Failed configuring rate %u: %d\n",
1430 rate, err);
1431 /* remove the rate from the table */
1432 if (rate)
1433 mlx5_rl_remove_rate(mdev, rate);
1434 return err;
1435 }
1436
1437 sq->rate_limit = rate;
1438 return 0;
1439}
1440
1441static int mlx5e_set_tx_maxrate(struct net_device *dev, int index, u32 rate)
1442{
1443 struct mlx5e_priv *priv = netdev_priv(dev);
1444 struct mlx5_core_dev *mdev = priv->mdev;
1445 struct mlx5e_sq *sq = priv->txq_to_sq_map[index];
1446 int err = 0;
1447
1448 if (!mlx5_rl_is_supported(mdev)) {
1449 netdev_err(dev, "Rate limiting is not supported on this device\n");
1450 return -EINVAL;
1451 }
1452
1453 /* rate is given in Mb/sec, HW config is in Kb/sec */
1454 rate = rate << 10;
1455
1456 /* Check whether rate in valid range, 0 is always valid */
1457 if (rate && !mlx5_rl_is_in_range(mdev, rate)) {
1458 netdev_err(dev, "TX rate %u, is not in range\n", rate);
1459 return -ERANGE;
1460 }
1461
1462 mutex_lock(&priv->state_lock);
1463 if (test_bit(MLX5E_STATE_OPENED, &priv->state))
1464 err = mlx5e_set_sq_maxrate(dev, sq, rate);
1465 if (!err)
1466 priv->tx_rates[index] = rate;
1467 mutex_unlock(&priv->state_lock);
1468
1469 return err;
1470}
1471
f62b8bb8
AV
1472static int mlx5e_open_channel(struct mlx5e_priv *priv, int ix,
1473 struct mlx5e_channel_param *cparam,
1474 struct mlx5e_channel **cp)
1475{
9908aa29 1476 struct mlx5e_cq_moder icosq_cq_moder = {0, 0};
f62b8bb8 1477 struct net_device *netdev = priv->netdev;
cb3c7fd4 1478 struct mlx5e_cq_moder rx_cq_profile;
f62b8bb8
AV
1479 int cpu = mlx5e_get_cpu(priv, ix);
1480 struct mlx5e_channel *c;
507f0c81 1481 struct mlx5e_sq *sq;
f62b8bb8 1482 int err;
507f0c81 1483 int i;
f62b8bb8
AV
1484
1485 c = kzalloc_node(sizeof(*c), GFP_KERNEL, cpu_to_node(cpu));
1486 if (!c)
1487 return -ENOMEM;
1488
1489 c->priv = priv;
1490 c->ix = ix;
1491 c->cpu = cpu;
1492 c->pdev = &priv->mdev->pdev->dev;
1493 c->netdev = priv->netdev;
b50d292b 1494 c->mkey_be = cpu_to_be32(priv->mdev->mlx5e_res.mkey.key);
a4418a6c 1495 c->num_tc = priv->params.num_tc;
d7a0ecab 1496 c->xdp = !!priv->xdp_prog;
f62b8bb8 1497
cb3c7fd4
GR
1498 if (priv->params.rx_am_enabled)
1499 rx_cq_profile = mlx5e_am_get_def_profile(priv->params.rx_cq_period_mode);
1500 else
1501 rx_cq_profile = priv->params.rx_cq_moderation;
1502
5283af89 1503 mlx5e_build_channeltc_to_txq_map(priv, ix);
03289b88 1504
f62b8bb8
AV
1505 netif_napi_add(netdev, &c->napi, mlx5e_napi_poll, 64);
1506
9908aa29 1507 err = mlx5e_open_cq(c, &cparam->icosq_cq, &c->icosq.cq, icosq_cq_moder);
f62b8bb8
AV
1508 if (err)
1509 goto err_napi_del;
1510
d3c9bc27
TT
1511 err = mlx5e_open_tx_cqs(c, cparam);
1512 if (err)
1513 goto err_close_icosq_cq;
1514
f62b8bb8 1515 err = mlx5e_open_cq(c, &cparam->rx_cq, &c->rq.cq,
cb3c7fd4 1516 rx_cq_profile);
f62b8bb8
AV
1517 if (err)
1518 goto err_close_tx_cqs;
f62b8bb8 1519
d7a0ecab
SM
1520 /* XDP SQ CQ params are same as normal TXQ sq CQ params */
1521 err = c->xdp ? mlx5e_open_cq(c, &cparam->tx_cq, &c->xdp_sq.cq,
1522 priv->params.tx_cq_moderation) : 0;
1523 if (err)
1524 goto err_close_rx_cq;
1525
f62b8bb8
AV
1526 napi_enable(&c->napi);
1527
d3c9bc27 1528 err = mlx5e_open_sq(c, 0, &cparam->icosq, &c->icosq);
f62b8bb8
AV
1529 if (err)
1530 goto err_disable_napi;
1531
d3c9bc27
TT
1532 err = mlx5e_open_sqs(c, cparam);
1533 if (err)
1534 goto err_close_icosq;
1535
507f0c81
YP
1536 for (i = 0; i < priv->params.num_tc; i++) {
1537 u32 txq_ix = priv->channeltc_to_txq_map[ix][i];
1538
1539 if (priv->tx_rates[txq_ix]) {
1540 sq = priv->txq_to_sq_map[txq_ix];
1541 mlx5e_set_sq_maxrate(priv->netdev, sq,
1542 priv->tx_rates[txq_ix]);
1543 }
1544 }
1545
d7a0ecab
SM
1546 err = c->xdp ? mlx5e_open_sq(c, 0, &cparam->xdp_sq, &c->xdp_sq) : 0;
1547 if (err)
1548 goto err_close_sqs;
b5503b99 1549
f62b8bb8
AV
1550 err = mlx5e_open_rq(c, &cparam->rq, &c->rq);
1551 if (err)
b5503b99 1552 goto err_close_xdp_sq;
f62b8bb8
AV
1553
1554 netif_set_xps_queue(netdev, get_cpu_mask(c->cpu), ix);
1555 *cp = c;
1556
1557 return 0;
b5503b99 1558err_close_xdp_sq:
d7a0ecab 1559 if (c->xdp)
87dc0255 1560 mlx5e_close_sq(&c->xdp_sq);
f62b8bb8
AV
1561
1562err_close_sqs:
1563 mlx5e_close_sqs(c);
1564
d3c9bc27
TT
1565err_close_icosq:
1566 mlx5e_close_sq(&c->icosq);
1567
f62b8bb8
AV
1568err_disable_napi:
1569 napi_disable(&c->napi);
d7a0ecab
SM
1570 if (c->xdp)
1571 mlx5e_close_cq(&c->xdp_sq.cq);
1572
1573err_close_rx_cq:
f62b8bb8
AV
1574 mlx5e_close_cq(&c->rq.cq);
1575
1576err_close_tx_cqs:
1577 mlx5e_close_tx_cqs(c);
1578
d3c9bc27
TT
1579err_close_icosq_cq:
1580 mlx5e_close_cq(&c->icosq.cq);
1581
f62b8bb8
AV
1582err_napi_del:
1583 netif_napi_del(&c->napi);
1584 kfree(c);
1585
1586 return err;
1587}
1588
1589static void mlx5e_close_channel(struct mlx5e_channel *c)
1590{
1591 mlx5e_close_rq(&c->rq);
b5503b99
SM
1592 if (c->xdp)
1593 mlx5e_close_sq(&c->xdp_sq);
f62b8bb8 1594 mlx5e_close_sqs(c);
d3c9bc27 1595 mlx5e_close_sq(&c->icosq);
f62b8bb8 1596 napi_disable(&c->napi);
b5503b99
SM
1597 if (c->xdp)
1598 mlx5e_close_cq(&c->xdp_sq.cq);
f62b8bb8
AV
1599 mlx5e_close_cq(&c->rq.cq);
1600 mlx5e_close_tx_cqs(c);
d3c9bc27 1601 mlx5e_close_cq(&c->icosq.cq);
f62b8bb8 1602 netif_napi_del(&c->napi);
7ae92ae5 1603
f62b8bb8
AV
1604 kfree(c);
1605}
1606
1607static void mlx5e_build_rq_param(struct mlx5e_priv *priv,
1608 struct mlx5e_rq_param *param)
1609{
1610 void *rqc = param->rqc;
1611 void *wq = MLX5_ADDR_OF(rqc, rqc, wq);
1612
461017cb
TT
1613 switch (priv->params.rq_wq_type) {
1614 case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ:
1615 MLX5_SET(wq, wq, log_wqe_num_of_strides,
d9d9f156 1616 priv->params.mpwqe_log_num_strides - 9);
461017cb 1617 MLX5_SET(wq, wq, log_wqe_stride_size,
d9d9f156 1618 priv->params.mpwqe_log_stride_sz - 6);
461017cb
TT
1619 MLX5_SET(wq, wq, wq_type, MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ);
1620 break;
1621 default: /* MLX5_WQ_TYPE_LINKED_LIST */
1622 MLX5_SET(wq, wq, wq_type, MLX5_WQ_TYPE_LINKED_LIST);
1623 }
1624
f62b8bb8
AV
1625 MLX5_SET(wq, wq, end_padding_mode, MLX5_WQ_END_PAD_MODE_ALIGN);
1626 MLX5_SET(wq, wq, log_wq_stride, ilog2(sizeof(struct mlx5e_rx_wqe)));
1627 MLX5_SET(wq, wq, log_wq_sz, priv->params.log_rq_size);
b50d292b 1628 MLX5_SET(wq, wq, pd, priv->mdev->mlx5e_res.pdn);
593cf338 1629 MLX5_SET(rqc, rqc, counter_set_id, priv->q_counter);
f62b8bb8 1630
311c7c71 1631 param->wq.buf_numa_node = dev_to_node(&priv->mdev->pdev->dev);
f62b8bb8 1632 param->wq.linear = 1;
cb3c7fd4
GR
1633
1634 param->am_enabled = priv->params.rx_am_enabled;
f62b8bb8
AV
1635}
1636
556dd1b9
TT
1637static void mlx5e_build_drop_rq_param(struct mlx5e_rq_param *param)
1638{
1639 void *rqc = param->rqc;
1640 void *wq = MLX5_ADDR_OF(rqc, rqc, wq);
1641
1642 MLX5_SET(wq, wq, wq_type, MLX5_WQ_TYPE_LINKED_LIST);
1643 MLX5_SET(wq, wq, log_wq_stride, ilog2(sizeof(struct mlx5e_rx_wqe)));
1644}
1645
d3c9bc27
TT
1646static void mlx5e_build_sq_param_common(struct mlx5e_priv *priv,
1647 struct mlx5e_sq_param *param)
f62b8bb8
AV
1648{
1649 void *sqc = param->sqc;
1650 void *wq = MLX5_ADDR_OF(sqc, sqc, wq);
1651
f62b8bb8 1652 MLX5_SET(wq, wq, log_wq_stride, ilog2(MLX5_SEND_WQE_BB));
b50d292b 1653 MLX5_SET(wq, wq, pd, priv->mdev->mlx5e_res.pdn);
f62b8bb8 1654
311c7c71 1655 param->wq.buf_numa_node = dev_to_node(&priv->mdev->pdev->dev);
d3c9bc27
TT
1656}
1657
1658static void mlx5e_build_sq_param(struct mlx5e_priv *priv,
1659 struct mlx5e_sq_param *param)
1660{
1661 void *sqc = param->sqc;
1662 void *wq = MLX5_ADDR_OF(sqc, sqc, wq);
1663
1664 mlx5e_build_sq_param_common(priv, param);
1665 MLX5_SET(wq, wq, log_wq_sz, priv->params.log_sq_size);
1666
58d52291 1667 param->max_inline = priv->params.tx_max_inline;
cff92d7c 1668 param->min_inline_mode = priv->params.tx_min_inline_mode;
f10b7cc7 1669 param->type = MLX5E_SQ_TXQ;
f62b8bb8
AV
1670}
1671
1672static void mlx5e_build_common_cq_param(struct mlx5e_priv *priv,
1673 struct mlx5e_cq_param *param)
1674{
1675 void *cqc = param->cqc;
1676
30aa60b3 1677 MLX5_SET(cqc, cqc, uar_page, priv->mdev->priv.uar->index);
f62b8bb8
AV
1678}
1679
1680static void mlx5e_build_rx_cq_param(struct mlx5e_priv *priv,
1681 struct mlx5e_cq_param *param)
1682{
1683 void *cqc = param->cqc;
461017cb 1684 u8 log_cq_size;
f62b8bb8 1685
461017cb
TT
1686 switch (priv->params.rq_wq_type) {
1687 case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ:
1688 log_cq_size = priv->params.log_rq_size +
d9d9f156 1689 priv->params.mpwqe_log_num_strides;
461017cb
TT
1690 break;
1691 default: /* MLX5_WQ_TYPE_LINKED_LIST */
1692 log_cq_size = priv->params.log_rq_size;
1693 }
1694
1695 MLX5_SET(cqc, cqc, log_cq_size, log_cq_size);
9bcc8606 1696 if (MLX5E_GET_PFLAG(priv, MLX5E_PFLAG_RX_CQE_COMPRESS)) {
7219ab34
TT
1697 MLX5_SET(cqc, cqc, mini_cqe_res_format, MLX5_CQE_FORMAT_CSUM);
1698 MLX5_SET(cqc, cqc, cqe_comp_en, 1);
1699 }
f62b8bb8
AV
1700
1701 mlx5e_build_common_cq_param(priv, param);
9908aa29
TT
1702
1703 param->cq_period_mode = priv->params.rx_cq_period_mode;
f62b8bb8
AV
1704}
1705
1706static void mlx5e_build_tx_cq_param(struct mlx5e_priv *priv,
1707 struct mlx5e_cq_param *param)
1708{
1709 void *cqc = param->cqc;
1710
d3c9bc27 1711 MLX5_SET(cqc, cqc, log_cq_size, priv->params.log_sq_size);
f62b8bb8
AV
1712
1713 mlx5e_build_common_cq_param(priv, param);
9908aa29
TT
1714
1715 param->cq_period_mode = MLX5_CQ_PERIOD_MODE_START_FROM_EQE;
f62b8bb8
AV
1716}
1717
d3c9bc27
TT
1718static void mlx5e_build_ico_cq_param(struct mlx5e_priv *priv,
1719 struct mlx5e_cq_param *param,
1720 u8 log_wq_size)
1721{
1722 void *cqc = param->cqc;
1723
1724 MLX5_SET(cqc, cqc, log_cq_size, log_wq_size);
1725
1726 mlx5e_build_common_cq_param(priv, param);
9908aa29
TT
1727
1728 param->cq_period_mode = MLX5_CQ_PERIOD_MODE_START_FROM_EQE;
d3c9bc27
TT
1729}
1730
1731static void mlx5e_build_icosq_param(struct mlx5e_priv *priv,
1732 struct mlx5e_sq_param *param,
1733 u8 log_wq_size)
1734{
1735 void *sqc = param->sqc;
1736 void *wq = MLX5_ADDR_OF(sqc, sqc, wq);
1737
1738 mlx5e_build_sq_param_common(priv, param);
1739
1740 MLX5_SET(wq, wq, log_wq_sz, log_wq_size);
bc77b240 1741 MLX5_SET(sqc, sqc, reg_umr, MLX5_CAP_ETH(priv->mdev, reg_umr_sq));
d3c9bc27 1742
f10b7cc7 1743 param->type = MLX5E_SQ_ICO;
d3c9bc27
TT
1744}
1745
b5503b99
SM
1746static void mlx5e_build_xdpsq_param(struct mlx5e_priv *priv,
1747 struct mlx5e_sq_param *param)
1748{
1749 void *sqc = param->sqc;
1750 void *wq = MLX5_ADDR_OF(sqc, sqc, wq);
1751
1752 mlx5e_build_sq_param_common(priv, param);
1753 MLX5_SET(wq, wq, log_wq_sz, priv->params.log_sq_size);
1754
1755 param->max_inline = priv->params.tx_max_inline;
1756 /* FOR XDP SQs will support only L2 inline mode */
1757 param->min_inline_mode = MLX5_INLINE_MODE_NONE;
1758 param->type = MLX5E_SQ_XDP;
1759}
1760
6b87663f 1761static void mlx5e_build_channel_param(struct mlx5e_priv *priv, struct mlx5e_channel_param *cparam)
f62b8bb8 1762{
bc77b240 1763 u8 icosq_log_wq_sz = MLX5E_PARAMS_MINIMUM_LOG_SQ_SIZE;
d3c9bc27 1764
f62b8bb8
AV
1765 mlx5e_build_rq_param(priv, &cparam->rq);
1766 mlx5e_build_sq_param(priv, &cparam->sq);
b5503b99 1767 mlx5e_build_xdpsq_param(priv, &cparam->xdp_sq);
d3c9bc27 1768 mlx5e_build_icosq_param(priv, &cparam->icosq, icosq_log_wq_sz);
f62b8bb8
AV
1769 mlx5e_build_rx_cq_param(priv, &cparam->rx_cq);
1770 mlx5e_build_tx_cq_param(priv, &cparam->tx_cq);
d3c9bc27 1771 mlx5e_build_ico_cq_param(priv, &cparam->icosq_cq, icosq_log_wq_sz);
f62b8bb8
AV
1772}
1773
1774static int mlx5e_open_channels(struct mlx5e_priv *priv)
1775{
6b87663f 1776 struct mlx5e_channel_param *cparam;
a4418a6c 1777 int nch = priv->params.num_channels;
03289b88 1778 int err = -ENOMEM;
f62b8bb8
AV
1779 int i;
1780 int j;
1781
a4418a6c
AS
1782 priv->channel = kcalloc(nch, sizeof(struct mlx5e_channel *),
1783 GFP_KERNEL);
03289b88 1784
a4418a6c 1785 priv->txq_to_sq_map = kcalloc(nch * priv->params.num_tc,
03289b88
SM
1786 sizeof(struct mlx5e_sq *), GFP_KERNEL);
1787
6b87663f
AB
1788 cparam = kzalloc(sizeof(struct mlx5e_channel_param), GFP_KERNEL);
1789
1790 if (!priv->channel || !priv->txq_to_sq_map || !cparam)
03289b88 1791 goto err_free_txq_to_sq_map;
f62b8bb8 1792
6b87663f
AB
1793 mlx5e_build_channel_param(priv, cparam);
1794
a4418a6c 1795 for (i = 0; i < nch; i++) {
6b87663f 1796 err = mlx5e_open_channel(priv, i, cparam, &priv->channel[i]);
f62b8bb8
AV
1797 if (err)
1798 goto err_close_channels;
1799 }
1800
a4418a6c 1801 for (j = 0; j < nch; j++) {
f62b8bb8
AV
1802 err = mlx5e_wait_for_min_rx_wqes(&priv->channel[j]->rq);
1803 if (err)
1804 goto err_close_channels;
1805 }
1806
c3b7c5c9
MHY
1807 /* FIXME: This is a W/A for tx timeout watch dog false alarm when
1808 * polling for inactive tx queues.
1809 */
1810 netif_tx_start_all_queues(priv->netdev);
1811
6b87663f 1812 kfree(cparam);
f62b8bb8
AV
1813 return 0;
1814
1815err_close_channels:
1816 for (i--; i >= 0; i--)
1817 mlx5e_close_channel(priv->channel[i]);
1818
03289b88
SM
1819err_free_txq_to_sq_map:
1820 kfree(priv->txq_to_sq_map);
f62b8bb8 1821 kfree(priv->channel);
6b87663f 1822 kfree(cparam);
f62b8bb8
AV
1823
1824 return err;
1825}
1826
1827static void mlx5e_close_channels(struct mlx5e_priv *priv)
1828{
1829 int i;
1830
c3b7c5c9
MHY
1831 /* FIXME: This is a W/A only for tx timeout watch dog false alarm when
1832 * polling for inactive tx queues.
1833 */
1834 netif_tx_stop_all_queues(priv->netdev);
1835 netif_tx_disable(priv->netdev);
1836
f62b8bb8
AV
1837 for (i = 0; i < priv->params.num_channels; i++)
1838 mlx5e_close_channel(priv->channel[i]);
1839
03289b88 1840 kfree(priv->txq_to_sq_map);
f62b8bb8
AV
1841 kfree(priv->channel);
1842}
1843
2be6967c
SM
1844static int mlx5e_rx_hash_fn(int hfunc)
1845{
1846 return (hfunc == ETH_RSS_HASH_TOP) ?
1847 MLX5_RX_HASH_FN_TOEPLITZ :
1848 MLX5_RX_HASH_FN_INVERTED_XOR8;
1849}
1850
1851static int mlx5e_bits_invert(unsigned long a, int size)
1852{
1853 int inv = 0;
1854 int i;
1855
1856 for (i = 0; i < size; i++)
1857 inv |= (test_bit(size - i - 1, &a) ? 1 : 0) << i;
1858
1859 return inv;
1860}
1861
936896e9
AS
1862static void mlx5e_fill_indir_rqt_rqns(struct mlx5e_priv *priv, void *rqtc)
1863{
1864 int i;
1865
1866 for (i = 0; i < MLX5E_INDIR_RQT_SIZE; i++) {
1867 int ix = i;
1da36696 1868 u32 rqn;
936896e9
AS
1869
1870 if (priv->params.rss_hfunc == ETH_RSS_HASH_XOR)
1871 ix = mlx5e_bits_invert(i, MLX5E_LOG_INDIR_RQT_SIZE);
1872
2d75b2bc 1873 ix = priv->params.indirection_rqt[ix];
1da36696
TT
1874 rqn = test_bit(MLX5E_STATE_OPENED, &priv->state) ?
1875 priv->channel[ix]->rq.rqn :
1876 priv->drop_rq.rqn;
1877 MLX5_SET(rqtc, rqtc, rq_num[i], rqn);
936896e9
AS
1878 }
1879}
1880
1da36696
TT
1881static void mlx5e_fill_direct_rqt_rqn(struct mlx5e_priv *priv, void *rqtc,
1882 int ix)
4cbeaff5 1883{
1da36696
TT
1884 u32 rqn = test_bit(MLX5E_STATE_OPENED, &priv->state) ?
1885 priv->channel[ix]->rq.rqn :
1886 priv->drop_rq.rqn;
4cbeaff5 1887
1da36696 1888 MLX5_SET(rqtc, rqtc, rq_num[0], rqn);
4cbeaff5
AS
1889}
1890
398f3351
HHZ
1891static int mlx5e_create_rqt(struct mlx5e_priv *priv, int sz,
1892 int ix, struct mlx5e_rqt *rqt)
f62b8bb8
AV
1893{
1894 struct mlx5_core_dev *mdev = priv->mdev;
f62b8bb8
AV
1895 void *rqtc;
1896 int inlen;
1897 int err;
1da36696 1898 u32 *in;
f62b8bb8 1899
f62b8bb8
AV
1900 inlen = MLX5_ST_SZ_BYTES(create_rqt_in) + sizeof(u32) * sz;
1901 in = mlx5_vzalloc(inlen);
1902 if (!in)
1903 return -ENOMEM;
1904
1905 rqtc = MLX5_ADDR_OF(create_rqt_in, in, rqt_context);
1906
1907 MLX5_SET(rqtc, rqtc, rqt_actual_size, sz);
1908 MLX5_SET(rqtc, rqtc, rqt_max_size, sz);
1909
1da36696
TT
1910 if (sz > 1) /* RSS */
1911 mlx5e_fill_indir_rqt_rqns(priv, rqtc);
1912 else
1913 mlx5e_fill_direct_rqt_rqn(priv, rqtc, ix);
2be6967c 1914
398f3351
HHZ
1915 err = mlx5_core_create_rqt(mdev, in, inlen, &rqt->rqtn);
1916 if (!err)
1917 rqt->enabled = true;
f62b8bb8
AV
1918
1919 kvfree(in);
1da36696
TT
1920 return err;
1921}
1922
cb67b832 1923void mlx5e_destroy_rqt(struct mlx5e_priv *priv, struct mlx5e_rqt *rqt)
1da36696 1924{
398f3351
HHZ
1925 rqt->enabled = false;
1926 mlx5_core_destroy_rqt(priv->mdev, rqt->rqtn);
1da36696
TT
1927}
1928
6bfd390b
HHZ
1929static int mlx5e_create_indirect_rqts(struct mlx5e_priv *priv)
1930{
1931 struct mlx5e_rqt *rqt = &priv->indir_rqt;
1932
1933 return mlx5e_create_rqt(priv, MLX5E_INDIR_RQT_SIZE, 0, rqt);
1934}
1935
cb67b832 1936int mlx5e_create_direct_rqts(struct mlx5e_priv *priv)
1da36696 1937{
398f3351 1938 struct mlx5e_rqt *rqt;
1da36696
TT
1939 int err;
1940 int ix;
1941
6bfd390b 1942 for (ix = 0; ix < priv->profile->max_nch(priv->mdev); ix++) {
398f3351
HHZ
1943 rqt = &priv->direct_tir[ix].rqt;
1944 err = mlx5e_create_rqt(priv, 1 /*size */, ix, rqt);
1da36696
TT
1945 if (err)
1946 goto err_destroy_rqts;
1947 }
1948
1949 return 0;
1950
1951err_destroy_rqts:
1952 for (ix--; ix >= 0; ix--)
398f3351 1953 mlx5e_destroy_rqt(priv, &priv->direct_tir[ix].rqt);
1da36696 1954
f62b8bb8
AV
1955 return err;
1956}
1957
1da36696 1958int mlx5e_redirect_rqt(struct mlx5e_priv *priv, u32 rqtn, int sz, int ix)
5c50368f
AS
1959{
1960 struct mlx5_core_dev *mdev = priv->mdev;
5c50368f
AS
1961 void *rqtc;
1962 int inlen;
1da36696 1963 u32 *in;
5c50368f
AS
1964 int err;
1965
5c50368f
AS
1966 inlen = MLX5_ST_SZ_BYTES(modify_rqt_in) + sizeof(u32) * sz;
1967 in = mlx5_vzalloc(inlen);
1968 if (!in)
1969 return -ENOMEM;
1970
1971 rqtc = MLX5_ADDR_OF(modify_rqt_in, in, ctx);
1972
1973 MLX5_SET(rqtc, rqtc, rqt_actual_size, sz);
1da36696
TT
1974 if (sz > 1) /* RSS */
1975 mlx5e_fill_indir_rqt_rqns(priv, rqtc);
1976 else
1977 mlx5e_fill_direct_rqt_rqn(priv, rqtc, ix);
5c50368f
AS
1978
1979 MLX5_SET(modify_rqt_in, in, bitmask.rqn_list, 1);
1980
1da36696 1981 err = mlx5_core_modify_rqt(mdev, rqtn, in, inlen);
5c50368f
AS
1982
1983 kvfree(in);
1984
1985 return err;
1986}
1987
40ab6a6e
AS
1988static void mlx5e_redirect_rqts(struct mlx5e_priv *priv)
1989{
1da36696
TT
1990 u32 rqtn;
1991 int ix;
1992
398f3351
HHZ
1993 if (priv->indir_rqt.enabled) {
1994 rqtn = priv->indir_rqt.rqtn;
1995 mlx5e_redirect_rqt(priv, rqtn, MLX5E_INDIR_RQT_SIZE, 0);
1996 }
1997
1da36696 1998 for (ix = 0; ix < priv->params.num_channels; ix++) {
398f3351
HHZ
1999 if (!priv->direct_tir[ix].rqt.enabled)
2000 continue;
2001 rqtn = priv->direct_tir[ix].rqt.rqtn;
1da36696
TT
2002 mlx5e_redirect_rqt(priv, rqtn, 1, ix);
2003 }
40ab6a6e
AS
2004}
2005
5c50368f
AS
2006static void mlx5e_build_tir_ctx_lro(void *tirc, struct mlx5e_priv *priv)
2007{
2008 if (!priv->params.lro_en)
2009 return;
2010
2011#define ROUGH_MAX_L2_L3_HDR_SZ 256
2012
2013 MLX5_SET(tirc, tirc, lro_enable_mask,
2014 MLX5_TIRC_LRO_ENABLE_MASK_IPV4_LRO |
2015 MLX5_TIRC_LRO_ENABLE_MASK_IPV6_LRO);
2016 MLX5_SET(tirc, tirc, lro_max_ip_payload_size,
2017 (priv->params.lro_wqe_sz -
2018 ROUGH_MAX_L2_L3_HDR_SZ) >> 8);
2b029556 2019 MLX5_SET(tirc, tirc, lro_timeout_period_usecs, priv->params.lro_timeout);
5c50368f
AS
2020}
2021
bdfc028d
TT
2022void mlx5e_build_tir_ctx_hash(void *tirc, struct mlx5e_priv *priv)
2023{
2024 MLX5_SET(tirc, tirc, rx_hash_fn,
2025 mlx5e_rx_hash_fn(priv->params.rss_hfunc));
2026 if (priv->params.rss_hfunc == ETH_RSS_HASH_TOP) {
2027 void *rss_key = MLX5_ADDR_OF(tirc, tirc,
2028 rx_hash_toeplitz_key);
2029 size_t len = MLX5_FLD_SZ_BYTES(tirc,
2030 rx_hash_toeplitz_key);
2031
2032 MLX5_SET(tirc, tirc, rx_hash_symmetric, 1);
2033 memcpy(rss_key, priv->params.toeplitz_hash_key, len);
2034 }
2035}
2036
ab0394fe 2037static int mlx5e_modify_tirs_lro(struct mlx5e_priv *priv)
5c50368f
AS
2038{
2039 struct mlx5_core_dev *mdev = priv->mdev;
2040
2041 void *in;
2042 void *tirc;
2043 int inlen;
2044 int err;
ab0394fe 2045 int tt;
1da36696 2046 int ix;
5c50368f
AS
2047
2048 inlen = MLX5_ST_SZ_BYTES(modify_tir_in);
2049 in = mlx5_vzalloc(inlen);
2050 if (!in)
2051 return -ENOMEM;
2052
2053 MLX5_SET(modify_tir_in, in, bitmask.lro, 1);
2054 tirc = MLX5_ADDR_OF(modify_tir_in, in, ctx);
2055
2056 mlx5e_build_tir_ctx_lro(tirc, priv);
2057
1da36696 2058 for (tt = 0; tt < MLX5E_NUM_INDIR_TIRS; tt++) {
724b2aa1 2059 err = mlx5_core_modify_tir(mdev, priv->indir_tir[tt].tirn, in,
1da36696 2060 inlen);
ab0394fe 2061 if (err)
1da36696 2062 goto free_in;
ab0394fe 2063 }
5c50368f 2064
6bfd390b 2065 for (ix = 0; ix < priv->profile->max_nch(priv->mdev); ix++) {
1da36696
TT
2066 err = mlx5_core_modify_tir(mdev, priv->direct_tir[ix].tirn,
2067 in, inlen);
2068 if (err)
2069 goto free_in;
2070 }
2071
2072free_in:
5c50368f
AS
2073 kvfree(in);
2074
2075 return err;
2076}
2077
cd255eff 2078static int mlx5e_set_mtu(struct mlx5e_priv *priv, u16 mtu)
40ab6a6e 2079{
40ab6a6e 2080 struct mlx5_core_dev *mdev = priv->mdev;
cd255eff 2081 u16 hw_mtu = MLX5E_SW2HW_MTU(mtu);
40ab6a6e
AS
2082 int err;
2083
cd255eff 2084 err = mlx5_set_port_mtu(mdev, hw_mtu, 1);
40ab6a6e
AS
2085 if (err)
2086 return err;
2087
cd255eff
SM
2088 /* Update vport context MTU */
2089 mlx5_modify_nic_vport_mtu(mdev, hw_mtu);
2090 return 0;
2091}
40ab6a6e 2092
cd255eff
SM
2093static void mlx5e_query_mtu(struct mlx5e_priv *priv, u16 *mtu)
2094{
2095 struct mlx5_core_dev *mdev = priv->mdev;
2096 u16 hw_mtu = 0;
2097 int err;
40ab6a6e 2098
cd255eff
SM
2099 err = mlx5_query_nic_vport_mtu(mdev, &hw_mtu);
2100 if (err || !hw_mtu) /* fallback to port oper mtu */
2101 mlx5_query_port_oper_mtu(mdev, &hw_mtu, 1);
2102
2103 *mtu = MLX5E_HW2SW_MTU(hw_mtu);
2104}
2105
2106static int mlx5e_set_dev_port_mtu(struct net_device *netdev)
2107{
2108 struct mlx5e_priv *priv = netdev_priv(netdev);
2109 u16 mtu;
2110 int err;
2111
2112 err = mlx5e_set_mtu(priv, netdev->mtu);
2113 if (err)
2114 return err;
40ab6a6e 2115
cd255eff
SM
2116 mlx5e_query_mtu(priv, &mtu);
2117 if (mtu != netdev->mtu)
2118 netdev_warn(netdev, "%s: VPort MTU %d is different than netdev mtu %d\n",
2119 __func__, mtu, netdev->mtu);
40ab6a6e 2120
cd255eff 2121 netdev->mtu = mtu;
40ab6a6e
AS
2122 return 0;
2123}
2124
08fb1dac
SM
2125static void mlx5e_netdev_set_tcs(struct net_device *netdev)
2126{
2127 struct mlx5e_priv *priv = netdev_priv(netdev);
2128 int nch = priv->params.num_channels;
2129 int ntc = priv->params.num_tc;
2130 int tc;
2131
2132 netdev_reset_tc(netdev);
2133
2134 if (ntc == 1)
2135 return;
2136
2137 netdev_set_num_tc(netdev, ntc);
2138
7ccdd084
RS
2139 /* Map netdev TCs to offset 0
2140 * We have our own UP to TXQ mapping for QoS
2141 */
08fb1dac 2142 for (tc = 0; tc < ntc; tc++)
7ccdd084 2143 netdev_set_tc_queue(netdev, tc, nch, 0);
08fb1dac
SM
2144}
2145
40ab6a6e
AS
2146int mlx5e_open_locked(struct net_device *netdev)
2147{
2148 struct mlx5e_priv *priv = netdev_priv(netdev);
cb67b832 2149 struct mlx5_core_dev *mdev = priv->mdev;
40ab6a6e
AS
2150 int num_txqs;
2151 int err;
2152
2153 set_bit(MLX5E_STATE_OPENED, &priv->state);
2154
08fb1dac
SM
2155 mlx5e_netdev_set_tcs(netdev);
2156
40ab6a6e
AS
2157 num_txqs = priv->params.num_channels * priv->params.num_tc;
2158 netif_set_real_num_tx_queues(netdev, num_txqs);
2159 netif_set_real_num_rx_queues(netdev, priv->params.num_channels);
2160
40ab6a6e
AS
2161 err = mlx5e_open_channels(priv);
2162 if (err) {
2163 netdev_err(netdev, "%s: mlx5e_open_channels failed, %d\n",
2164 __func__, err);
343b29f3 2165 goto err_clear_state_opened_flag;
40ab6a6e
AS
2166 }
2167
0952da79 2168 err = mlx5e_refresh_tirs_self_loopback(priv->mdev, false);
66189961
TT
2169 if (err) {
2170 netdev_err(netdev, "%s: mlx5e_refresh_tirs_self_loopback_enable failed, %d\n",
2171 __func__, err);
2172 goto err_close_channels;
2173 }
2174
40ab6a6e 2175 mlx5e_redirect_rqts(priv);
ce89ef36 2176 mlx5e_update_carrier(priv);
ef9814de 2177 mlx5e_timestamp_init(priv);
5a7b27eb
MG
2178#ifdef CONFIG_RFS_ACCEL
2179 priv->netdev->rx_cpu_rmap = priv->mdev->rmap;
2180#endif
cb67b832
HHZ
2181 if (priv->profile->update_stats)
2182 queue_delayed_work(priv->wq, &priv->update_stats_work, 0);
40ab6a6e 2183
cb67b832
HHZ
2184 if (MLX5_CAP_GEN(mdev, vport_group_manager)) {
2185 err = mlx5e_add_sqs_fwd_rules(priv);
2186 if (err)
2187 goto err_close_channels;
2188 }
9b37b07f 2189 return 0;
343b29f3 2190
66189961
TT
2191err_close_channels:
2192 mlx5e_close_channels(priv);
343b29f3
AS
2193err_clear_state_opened_flag:
2194 clear_bit(MLX5E_STATE_OPENED, &priv->state);
2195 return err;
40ab6a6e
AS
2196}
2197
cb67b832 2198int mlx5e_open(struct net_device *netdev)
40ab6a6e
AS
2199{
2200 struct mlx5e_priv *priv = netdev_priv(netdev);
2201 int err;
2202
2203 mutex_lock(&priv->state_lock);
2204 err = mlx5e_open_locked(netdev);
2205 mutex_unlock(&priv->state_lock);
2206
2207 return err;
2208}
2209
2210int mlx5e_close_locked(struct net_device *netdev)
2211{
2212 struct mlx5e_priv *priv = netdev_priv(netdev);
cb67b832 2213 struct mlx5_core_dev *mdev = priv->mdev;
40ab6a6e 2214
a1985740
AS
2215 /* May already be CLOSED in case a previous configuration operation
2216 * (e.g RX/TX queue size change) that involves close&open failed.
2217 */
2218 if (!test_bit(MLX5E_STATE_OPENED, &priv->state))
2219 return 0;
2220
40ab6a6e
AS
2221 clear_bit(MLX5E_STATE_OPENED, &priv->state);
2222
cb67b832
HHZ
2223 if (MLX5_CAP_GEN(mdev, vport_group_manager))
2224 mlx5e_remove_sqs_fwd_rules(priv);
2225
ef9814de 2226 mlx5e_timestamp_cleanup(priv);
40ab6a6e 2227 netif_carrier_off(priv->netdev);
ce89ef36 2228 mlx5e_redirect_rqts(priv);
40ab6a6e
AS
2229 mlx5e_close_channels(priv);
2230
2231 return 0;
2232}
2233
cb67b832 2234int mlx5e_close(struct net_device *netdev)
40ab6a6e
AS
2235{
2236 struct mlx5e_priv *priv = netdev_priv(netdev);
2237 int err;
2238
26e59d80
MHY
2239 if (!netif_device_present(netdev))
2240 return -ENODEV;
2241
40ab6a6e
AS
2242 mutex_lock(&priv->state_lock);
2243 err = mlx5e_close_locked(netdev);
2244 mutex_unlock(&priv->state_lock);
2245
2246 return err;
2247}
2248
2249static int mlx5e_create_drop_rq(struct mlx5e_priv *priv,
2250 struct mlx5e_rq *rq,
2251 struct mlx5e_rq_param *param)
2252{
2253 struct mlx5_core_dev *mdev = priv->mdev;
2254 void *rqc = param->rqc;
2255 void *rqc_wq = MLX5_ADDR_OF(rqc, rqc, wq);
2256 int err;
2257
2258 param->wq.db_numa_node = param->wq.buf_numa_node;
2259
2260 err = mlx5_wq_ll_create(mdev, &param->wq, rqc_wq, &rq->wq,
2261 &rq->wq_ctrl);
2262 if (err)
2263 return err;
2264
2265 rq->priv = priv;
2266
2267 return 0;
2268}
2269
2270static int mlx5e_create_drop_cq(struct mlx5e_priv *priv,
2271 struct mlx5e_cq *cq,
2272 struct mlx5e_cq_param *param)
2273{
2274 struct mlx5_core_dev *mdev = priv->mdev;
2275 struct mlx5_core_cq *mcq = &cq->mcq;
2276 int eqn_not_used;
0b6e26ce 2277 unsigned int irqn;
40ab6a6e
AS
2278 int err;
2279
2280 err = mlx5_cqwq_create(mdev, &param->wq, param->cqc, &cq->wq,
2281 &cq->wq_ctrl);
2282 if (err)
2283 return err;
2284
2285 mlx5_vector2eqn(mdev, param->eq_ix, &eqn_not_used, &irqn);
2286
2287 mcq->cqe_sz = 64;
2288 mcq->set_ci_db = cq->wq_ctrl.db.db;
2289 mcq->arm_db = cq->wq_ctrl.db.db + 1;
2290 *mcq->set_ci_db = 0;
2291 *mcq->arm_db = 0;
2292 mcq->vector = param->eq_ix;
2293 mcq->comp = mlx5e_completion_event;
2294 mcq->event = mlx5e_cq_error_event;
2295 mcq->irqn = irqn;
40ab6a6e
AS
2296
2297 cq->priv = priv;
2298
2299 return 0;
2300}
2301
2302static int mlx5e_open_drop_rq(struct mlx5e_priv *priv)
2303{
2304 struct mlx5e_cq_param cq_param;
2305 struct mlx5e_rq_param rq_param;
2306 struct mlx5e_rq *rq = &priv->drop_rq;
2307 struct mlx5e_cq *cq = &priv->drop_rq.cq;
2308 int err;
2309
2310 memset(&cq_param, 0, sizeof(cq_param));
2311 memset(&rq_param, 0, sizeof(rq_param));
556dd1b9 2312 mlx5e_build_drop_rq_param(&rq_param);
40ab6a6e
AS
2313
2314 err = mlx5e_create_drop_cq(priv, cq, &cq_param);
2315 if (err)
2316 return err;
2317
2318 err = mlx5e_enable_cq(cq, &cq_param);
2319 if (err)
2320 goto err_destroy_cq;
2321
2322 err = mlx5e_create_drop_rq(priv, rq, &rq_param);
2323 if (err)
2324 goto err_disable_cq;
2325
2326 err = mlx5e_enable_rq(rq, &rq_param);
2327 if (err)
2328 goto err_destroy_rq;
2329
2330 return 0;
2331
2332err_destroy_rq:
2333 mlx5e_destroy_rq(&priv->drop_rq);
2334
2335err_disable_cq:
2336 mlx5e_disable_cq(&priv->drop_rq.cq);
2337
2338err_destroy_cq:
2339 mlx5e_destroy_cq(&priv->drop_rq.cq);
2340
2341 return err;
2342}
2343
2344static void mlx5e_close_drop_rq(struct mlx5e_priv *priv)
2345{
2346 mlx5e_disable_rq(&priv->drop_rq);
2347 mlx5e_destroy_rq(&priv->drop_rq);
2348 mlx5e_disable_cq(&priv->drop_rq.cq);
2349 mlx5e_destroy_cq(&priv->drop_rq.cq);
2350}
2351
2352static int mlx5e_create_tis(struct mlx5e_priv *priv, int tc)
2353{
2354 struct mlx5_core_dev *mdev = priv->mdev;
c4f287c4 2355 u32 in[MLX5_ST_SZ_DW(create_tis_in)] = {0};
40ab6a6e
AS
2356 void *tisc = MLX5_ADDR_OF(create_tis_in, in, ctx);
2357
08fb1dac 2358 MLX5_SET(tisc, tisc, prio, tc << 1);
b50d292b 2359 MLX5_SET(tisc, tisc, transport_domain, mdev->mlx5e_res.td.tdn);
db60b802
AH
2360
2361 if (mlx5_lag_is_lacp_owner(mdev))
2362 MLX5_SET(tisc, tisc, strict_lag_tx_port_affinity, 1);
2363
40ab6a6e
AS
2364 return mlx5_core_create_tis(mdev, in, sizeof(in), &priv->tisn[tc]);
2365}
2366
2367static void mlx5e_destroy_tis(struct mlx5e_priv *priv, int tc)
2368{
2369 mlx5_core_destroy_tis(priv->mdev, priv->tisn[tc]);
2370}
2371
cb67b832 2372int mlx5e_create_tises(struct mlx5e_priv *priv)
40ab6a6e
AS
2373{
2374 int err;
2375 int tc;
2376
6bfd390b 2377 for (tc = 0; tc < priv->profile->max_tc; tc++) {
40ab6a6e
AS
2378 err = mlx5e_create_tis(priv, tc);
2379 if (err)
2380 goto err_close_tises;
2381 }
2382
2383 return 0;
2384
2385err_close_tises:
2386 for (tc--; tc >= 0; tc--)
2387 mlx5e_destroy_tis(priv, tc);
2388
2389 return err;
2390}
2391
cb67b832 2392void mlx5e_cleanup_nic_tx(struct mlx5e_priv *priv)
40ab6a6e
AS
2393{
2394 int tc;
2395
6bfd390b 2396 for (tc = 0; tc < priv->profile->max_tc; tc++)
40ab6a6e
AS
2397 mlx5e_destroy_tis(priv, tc);
2398}
2399
1da36696
TT
2400static void mlx5e_build_indir_tir_ctx(struct mlx5e_priv *priv, u32 *tirc,
2401 enum mlx5e_traffic_types tt)
f62b8bb8
AV
2402{
2403 void *hfso = MLX5_ADDR_OF(tirc, tirc, rx_hash_field_selector_outer);
2404
b50d292b 2405 MLX5_SET(tirc, tirc, transport_domain, priv->mdev->mlx5e_res.td.tdn);
3191e05f 2406
5a6f8aef
AS
2407#define MLX5_HASH_IP (MLX5_HASH_FIELD_SEL_SRC_IP |\
2408 MLX5_HASH_FIELD_SEL_DST_IP)
f62b8bb8 2409
5a6f8aef
AS
2410#define MLX5_HASH_IP_L4PORTS (MLX5_HASH_FIELD_SEL_SRC_IP |\
2411 MLX5_HASH_FIELD_SEL_DST_IP |\
2412 MLX5_HASH_FIELD_SEL_L4_SPORT |\
2413 MLX5_HASH_FIELD_SEL_L4_DPORT)
f62b8bb8 2414
a741749f
AS
2415#define MLX5_HASH_IP_IPSEC_SPI (MLX5_HASH_FIELD_SEL_SRC_IP |\
2416 MLX5_HASH_FIELD_SEL_DST_IP |\
2417 MLX5_HASH_FIELD_SEL_IPSEC_SPI)
2418
5c50368f 2419 mlx5e_build_tir_ctx_lro(tirc, priv);
f62b8bb8 2420
4cbeaff5 2421 MLX5_SET(tirc, tirc, disp_type, MLX5_TIRC_DISP_TYPE_INDIRECT);
398f3351 2422 MLX5_SET(tirc, tirc, indirect_table, priv->indir_rqt.rqtn);
1da36696 2423 mlx5e_build_tir_ctx_hash(tirc, priv);
f62b8bb8
AV
2424
2425 switch (tt) {
2426 case MLX5E_TT_IPV4_TCP:
2427 MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
2428 MLX5_L3_PROT_TYPE_IPV4);
2429 MLX5_SET(rx_hash_field_select, hfso, l4_prot_type,
2430 MLX5_L4_PROT_TYPE_TCP);
2431 MLX5_SET(rx_hash_field_select, hfso, selected_fields,
5a6f8aef 2432 MLX5_HASH_IP_L4PORTS);
f62b8bb8
AV
2433 break;
2434
2435 case MLX5E_TT_IPV6_TCP:
2436 MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
2437 MLX5_L3_PROT_TYPE_IPV6);
2438 MLX5_SET(rx_hash_field_select, hfso, l4_prot_type,
2439 MLX5_L4_PROT_TYPE_TCP);
2440 MLX5_SET(rx_hash_field_select, hfso, selected_fields,
5a6f8aef 2441 MLX5_HASH_IP_L4PORTS);
f62b8bb8
AV
2442 break;
2443
2444 case MLX5E_TT_IPV4_UDP:
2445 MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
2446 MLX5_L3_PROT_TYPE_IPV4);
2447 MLX5_SET(rx_hash_field_select, hfso, l4_prot_type,
2448 MLX5_L4_PROT_TYPE_UDP);
2449 MLX5_SET(rx_hash_field_select, hfso, selected_fields,
5a6f8aef 2450 MLX5_HASH_IP_L4PORTS);
f62b8bb8
AV
2451 break;
2452
2453 case MLX5E_TT_IPV6_UDP:
2454 MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
2455 MLX5_L3_PROT_TYPE_IPV6);
2456 MLX5_SET(rx_hash_field_select, hfso, l4_prot_type,
2457 MLX5_L4_PROT_TYPE_UDP);
2458 MLX5_SET(rx_hash_field_select, hfso, selected_fields,
5a6f8aef 2459 MLX5_HASH_IP_L4PORTS);
f62b8bb8
AV
2460 break;
2461
a741749f
AS
2462 case MLX5E_TT_IPV4_IPSEC_AH:
2463 MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
2464 MLX5_L3_PROT_TYPE_IPV4);
2465 MLX5_SET(rx_hash_field_select, hfso, selected_fields,
2466 MLX5_HASH_IP_IPSEC_SPI);
2467 break;
2468
2469 case MLX5E_TT_IPV6_IPSEC_AH:
2470 MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
2471 MLX5_L3_PROT_TYPE_IPV6);
2472 MLX5_SET(rx_hash_field_select, hfso, selected_fields,
2473 MLX5_HASH_IP_IPSEC_SPI);
2474 break;
2475
2476 case MLX5E_TT_IPV4_IPSEC_ESP:
2477 MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
2478 MLX5_L3_PROT_TYPE_IPV4);
2479 MLX5_SET(rx_hash_field_select, hfso, selected_fields,
2480 MLX5_HASH_IP_IPSEC_SPI);
2481 break;
2482
2483 case MLX5E_TT_IPV6_IPSEC_ESP:
2484 MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
2485 MLX5_L3_PROT_TYPE_IPV6);
2486 MLX5_SET(rx_hash_field_select, hfso, selected_fields,
2487 MLX5_HASH_IP_IPSEC_SPI);
2488 break;
2489
f62b8bb8
AV
2490 case MLX5E_TT_IPV4:
2491 MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
2492 MLX5_L3_PROT_TYPE_IPV4);
2493 MLX5_SET(rx_hash_field_select, hfso, selected_fields,
2494 MLX5_HASH_IP);
2495 break;
2496
2497 case MLX5E_TT_IPV6:
2498 MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
2499 MLX5_L3_PROT_TYPE_IPV6);
2500 MLX5_SET(rx_hash_field_select, hfso, selected_fields,
2501 MLX5_HASH_IP);
2502 break;
1da36696
TT
2503 default:
2504 WARN_ONCE(true,
2505 "mlx5e_build_indir_tir_ctx: bad traffic type!\n");
f62b8bb8
AV
2506 }
2507}
2508
1da36696
TT
2509static void mlx5e_build_direct_tir_ctx(struct mlx5e_priv *priv, u32 *tirc,
2510 u32 rqtn)
f62b8bb8 2511{
b50d292b 2512 MLX5_SET(tirc, tirc, transport_domain, priv->mdev->mlx5e_res.td.tdn);
1da36696
TT
2513
2514 mlx5e_build_tir_ctx_lro(tirc, priv);
2515
2516 MLX5_SET(tirc, tirc, disp_type, MLX5_TIRC_DISP_TYPE_INDIRECT);
2517 MLX5_SET(tirc, tirc, indirect_table, rqtn);
2518 MLX5_SET(tirc, tirc, rx_hash_fn, MLX5_RX_HASH_FN_INVERTED_XOR8);
2519}
2520
6bfd390b 2521static int mlx5e_create_indirect_tirs(struct mlx5e_priv *priv)
1da36696 2522{
724b2aa1 2523 struct mlx5e_tir *tir;
f62b8bb8
AV
2524 void *tirc;
2525 int inlen;
2526 int err;
1da36696 2527 u32 *in;
1da36696 2528 int tt;
f62b8bb8
AV
2529
2530 inlen = MLX5_ST_SZ_BYTES(create_tir_in);
2531 in = mlx5_vzalloc(inlen);
2532 if (!in)
2533 return -ENOMEM;
2534
1da36696
TT
2535 for (tt = 0; tt < MLX5E_NUM_INDIR_TIRS; tt++) {
2536 memset(in, 0, inlen);
724b2aa1 2537 tir = &priv->indir_tir[tt];
1da36696
TT
2538 tirc = MLX5_ADDR_OF(create_tir_in, in, ctx);
2539 mlx5e_build_indir_tir_ctx(priv, tirc, tt);
724b2aa1 2540 err = mlx5e_create_tir(priv->mdev, tir, in, inlen);
f62b8bb8 2541 if (err)
40ab6a6e 2542 goto err_destroy_tirs;
f62b8bb8
AV
2543 }
2544
6bfd390b
HHZ
2545 kvfree(in);
2546
2547 return 0;
2548
2549err_destroy_tirs:
2550 for (tt--; tt >= 0; tt--)
2551 mlx5e_destroy_tir(priv->mdev, &priv->indir_tir[tt]);
2552
2553 kvfree(in);
2554
2555 return err;
2556}
2557
cb67b832 2558int mlx5e_create_direct_tirs(struct mlx5e_priv *priv)
6bfd390b
HHZ
2559{
2560 int nch = priv->profile->max_nch(priv->mdev);
2561 struct mlx5e_tir *tir;
2562 void *tirc;
2563 int inlen;
2564 int err;
2565 u32 *in;
2566 int ix;
2567
2568 inlen = MLX5_ST_SZ_BYTES(create_tir_in);
2569 in = mlx5_vzalloc(inlen);
2570 if (!in)
2571 return -ENOMEM;
2572
1da36696
TT
2573 for (ix = 0; ix < nch; ix++) {
2574 memset(in, 0, inlen);
724b2aa1 2575 tir = &priv->direct_tir[ix];
1da36696
TT
2576 tirc = MLX5_ADDR_OF(create_tir_in, in, ctx);
2577 mlx5e_build_direct_tir_ctx(priv, tirc,
398f3351 2578 priv->direct_tir[ix].rqt.rqtn);
724b2aa1 2579 err = mlx5e_create_tir(priv->mdev, tir, in, inlen);
1da36696
TT
2580 if (err)
2581 goto err_destroy_ch_tirs;
2582 }
2583
2584 kvfree(in);
2585
f62b8bb8
AV
2586 return 0;
2587
1da36696
TT
2588err_destroy_ch_tirs:
2589 for (ix--; ix >= 0; ix--)
724b2aa1 2590 mlx5e_destroy_tir(priv->mdev, &priv->direct_tir[ix]);
1da36696 2591
1da36696 2592 kvfree(in);
f62b8bb8
AV
2593
2594 return err;
2595}
2596
6bfd390b 2597static void mlx5e_destroy_indirect_tirs(struct mlx5e_priv *priv)
f62b8bb8
AV
2598{
2599 int i;
2600
1da36696 2601 for (i = 0; i < MLX5E_NUM_INDIR_TIRS; i++)
724b2aa1 2602 mlx5e_destroy_tir(priv->mdev, &priv->indir_tir[i]);
f62b8bb8
AV
2603}
2604
cb67b832 2605void mlx5e_destroy_direct_tirs(struct mlx5e_priv *priv)
6bfd390b
HHZ
2606{
2607 int nch = priv->profile->max_nch(priv->mdev);
2608 int i;
2609
2610 for (i = 0; i < nch; i++)
2611 mlx5e_destroy_tir(priv->mdev, &priv->direct_tir[i]);
2612}
2613
36350114
GP
2614int mlx5e_modify_rqs_vsd(struct mlx5e_priv *priv, bool vsd)
2615{
2616 int err = 0;
2617 int i;
2618
2619 if (!test_bit(MLX5E_STATE_OPENED, &priv->state))
2620 return 0;
2621
2622 for (i = 0; i < priv->params.num_channels; i++) {
2623 err = mlx5e_modify_rq_vsd(&priv->channel[i]->rq, vsd);
2624 if (err)
2625 return err;
2626 }
2627
2628 return 0;
2629}
2630
08fb1dac
SM
2631static int mlx5e_setup_tc(struct net_device *netdev, u8 tc)
2632{
2633 struct mlx5e_priv *priv = netdev_priv(netdev);
2634 bool was_opened;
2635 int err = 0;
2636
2637 if (tc && tc != MLX5E_MAX_NUM_TC)
2638 return -EINVAL;
2639
2640 mutex_lock(&priv->state_lock);
2641
2642 was_opened = test_bit(MLX5E_STATE_OPENED, &priv->state);
2643 if (was_opened)
2644 mlx5e_close_locked(priv->netdev);
2645
2646 priv->params.num_tc = tc ? tc : 1;
2647
2648 if (was_opened)
2649 err = mlx5e_open_locked(priv->netdev);
2650
2651 mutex_unlock(&priv->state_lock);
2652
2653 return err;
2654}
2655
2656static int mlx5e_ndo_setup_tc(struct net_device *dev, u32 handle,
2657 __be16 proto, struct tc_to_netdev *tc)
2658{
e8f887ac
AV
2659 struct mlx5e_priv *priv = netdev_priv(dev);
2660
2661 if (TC_H_MAJ(handle) != TC_H_MAJ(TC_H_INGRESS))
2662 goto mqprio;
2663
2664 switch (tc->type) {
e3a2b7ed
AV
2665 case TC_SETUP_CLSFLOWER:
2666 switch (tc->cls_flower->command) {
2667 case TC_CLSFLOWER_REPLACE:
2668 return mlx5e_configure_flower(priv, proto, tc->cls_flower);
2669 case TC_CLSFLOWER_DESTROY:
2670 return mlx5e_delete_flower(priv, tc->cls_flower);
aad7e08d
AV
2671 case TC_CLSFLOWER_STATS:
2672 return mlx5e_stats_flower(priv, tc->cls_flower);
e3a2b7ed 2673 }
e8f887ac
AV
2674 default:
2675 return -EOPNOTSUPP;
2676 }
2677
2678mqprio:
67ba422e 2679 if (tc->type != TC_SETUP_MQPRIO)
08fb1dac
SM
2680 return -EINVAL;
2681
2682 return mlx5e_setup_tc(dev, tc->tc);
2683}
2684
bc1f4470 2685static void
f62b8bb8
AV
2686mlx5e_get_stats(struct net_device *dev, struct rtnl_link_stats64 *stats)
2687{
2688 struct mlx5e_priv *priv = netdev_priv(dev);
9218b44d 2689 struct mlx5e_sw_stats *sstats = &priv->stats.sw;
f62b8bb8 2690 struct mlx5e_vport_stats *vstats = &priv->stats.vport;
269e6b3a 2691 struct mlx5e_pport_stats *pstats = &priv->stats.pport;
f62b8bb8 2692
370bad0f
OG
2693 if (mlx5e_is_uplink_rep(priv)) {
2694 stats->rx_packets = PPORT_802_3_GET(pstats, a_frames_received_ok);
2695 stats->rx_bytes = PPORT_802_3_GET(pstats, a_octets_received_ok);
2696 stats->tx_packets = PPORT_802_3_GET(pstats, a_frames_transmitted_ok);
2697 stats->tx_bytes = PPORT_802_3_GET(pstats, a_octets_transmitted_ok);
2698 } else {
2699 stats->rx_packets = sstats->rx_packets;
2700 stats->rx_bytes = sstats->rx_bytes;
2701 stats->tx_packets = sstats->tx_packets;
2702 stats->tx_bytes = sstats->tx_bytes;
2703 stats->tx_dropped = sstats->tx_queue_dropped;
2704 }
269e6b3a
GP
2705
2706 stats->rx_dropped = priv->stats.qcnt.rx_out_of_buffer;
269e6b3a
GP
2707
2708 stats->rx_length_errors =
9218b44d
GP
2709 PPORT_802_3_GET(pstats, a_in_range_length_errors) +
2710 PPORT_802_3_GET(pstats, a_out_of_range_length_field) +
2711 PPORT_802_3_GET(pstats, a_frame_too_long_errors);
269e6b3a 2712 stats->rx_crc_errors =
9218b44d
GP
2713 PPORT_802_3_GET(pstats, a_frame_check_sequence_errors);
2714 stats->rx_frame_errors = PPORT_802_3_GET(pstats, a_alignment_errors);
2715 stats->tx_aborted_errors = PPORT_2863_GET(pstats, if_out_discards);
269e6b3a 2716 stats->tx_carrier_errors =
9218b44d 2717 PPORT_802_3_GET(pstats, a_symbol_error_during_carrier);
269e6b3a
GP
2718 stats->rx_errors = stats->rx_length_errors + stats->rx_crc_errors +
2719 stats->rx_frame_errors;
2720 stats->tx_errors = stats->tx_aborted_errors + stats->tx_carrier_errors;
2721
2722 /* vport multicast also counts packets that are dropped due to steering
2723 * or rx out of buffer
2724 */
9218b44d
GP
2725 stats->multicast =
2726 VPORT_COUNTER_GET(vstats, received_eth_multicast.packets);
f62b8bb8 2727
f62b8bb8
AV
2728}
2729
2730static void mlx5e_set_rx_mode(struct net_device *dev)
2731{
2732 struct mlx5e_priv *priv = netdev_priv(dev);
2733
7bb29755 2734 queue_work(priv->wq, &priv->set_rx_mode_work);
f62b8bb8
AV
2735}
2736
2737static int mlx5e_set_mac(struct net_device *netdev, void *addr)
2738{
2739 struct mlx5e_priv *priv = netdev_priv(netdev);
2740 struct sockaddr *saddr = addr;
2741
2742 if (!is_valid_ether_addr(saddr->sa_data))
2743 return -EADDRNOTAVAIL;
2744
2745 netif_addr_lock_bh(netdev);
2746 ether_addr_copy(netdev->dev_addr, saddr->sa_data);
2747 netif_addr_unlock_bh(netdev);
2748
7bb29755 2749 queue_work(priv->wq, &priv->set_rx_mode_work);
f62b8bb8
AV
2750
2751 return 0;
2752}
2753
0e405443
GP
2754#define MLX5E_SET_FEATURE(netdev, feature, enable) \
2755 do { \
2756 if (enable) \
2757 netdev->features |= feature; \
2758 else \
2759 netdev->features &= ~feature; \
2760 } while (0)
2761
2762typedef int (*mlx5e_feature_handler)(struct net_device *netdev, bool enable);
2763
2764static int set_feature_lro(struct net_device *netdev, bool enable)
f62b8bb8
AV
2765{
2766 struct mlx5e_priv *priv = netdev_priv(netdev);
0e405443
GP
2767 bool was_opened = test_bit(MLX5E_STATE_OPENED, &priv->state);
2768 int err;
f62b8bb8
AV
2769
2770 mutex_lock(&priv->state_lock);
f62b8bb8 2771
0e405443
GP
2772 if (was_opened && (priv->params.rq_wq_type == MLX5_WQ_TYPE_LINKED_LIST))
2773 mlx5e_close_locked(priv->netdev);
98e81b0a 2774
0e405443
GP
2775 priv->params.lro_en = enable;
2776 err = mlx5e_modify_tirs_lro(priv);
2777 if (err) {
2778 netdev_err(netdev, "lro modify failed, %d\n", err);
2779 priv->params.lro_en = !enable;
98e81b0a 2780 }
f62b8bb8 2781
0e405443
GP
2782 if (was_opened && (priv->params.rq_wq_type == MLX5_WQ_TYPE_LINKED_LIST))
2783 mlx5e_open_locked(priv->netdev);
2784
9b37b07f
AS
2785 mutex_unlock(&priv->state_lock);
2786
0e405443
GP
2787 return err;
2788}
2789
2790static int set_feature_vlan_filter(struct net_device *netdev, bool enable)
2791{
2792 struct mlx5e_priv *priv = netdev_priv(netdev);
2793
2794 if (enable)
2795 mlx5e_enable_vlan_filter(priv);
2796 else
2797 mlx5e_disable_vlan_filter(priv);
2798
2799 return 0;
2800}
2801
2802static int set_feature_tc_num_filters(struct net_device *netdev, bool enable)
2803{
2804 struct mlx5e_priv *priv = netdev_priv(netdev);
f62b8bb8 2805
0e405443 2806 if (!enable && mlx5e_tc_num_filters(priv)) {
e8f887ac
AV
2807 netdev_err(netdev,
2808 "Active offloaded tc filters, can't turn hw_tc_offload off\n");
2809 return -EINVAL;
2810 }
2811
0e405443
GP
2812 return 0;
2813}
2814
94cb1ebb
EBE
2815static int set_feature_rx_all(struct net_device *netdev, bool enable)
2816{
2817 struct mlx5e_priv *priv = netdev_priv(netdev);
2818 struct mlx5_core_dev *mdev = priv->mdev;
2819
2820 return mlx5_set_port_fcs(mdev, !enable);
2821}
2822
36350114
GP
2823static int set_feature_rx_vlan(struct net_device *netdev, bool enable)
2824{
2825 struct mlx5e_priv *priv = netdev_priv(netdev);
2826 int err;
2827
2828 mutex_lock(&priv->state_lock);
2829
2830 priv->params.vlan_strip_disable = !enable;
2831 err = mlx5e_modify_rqs_vsd(priv, !enable);
2832 if (err)
2833 priv->params.vlan_strip_disable = enable;
2834
2835 mutex_unlock(&priv->state_lock);
2836
2837 return err;
2838}
2839
45bf454a
MG
2840#ifdef CONFIG_RFS_ACCEL
2841static int set_feature_arfs(struct net_device *netdev, bool enable)
2842{
2843 struct mlx5e_priv *priv = netdev_priv(netdev);
2844 int err;
2845
2846 if (enable)
2847 err = mlx5e_arfs_enable(priv);
2848 else
2849 err = mlx5e_arfs_disable(priv);
2850
2851 return err;
2852}
2853#endif
2854
0e405443
GP
2855static int mlx5e_handle_feature(struct net_device *netdev,
2856 netdev_features_t wanted_features,
2857 netdev_features_t feature,
2858 mlx5e_feature_handler feature_handler)
2859{
2860 netdev_features_t changes = wanted_features ^ netdev->features;
2861 bool enable = !!(wanted_features & feature);
2862 int err;
2863
2864 if (!(changes & feature))
2865 return 0;
2866
2867 err = feature_handler(netdev, enable);
2868 if (err) {
2869 netdev_err(netdev, "%s feature 0x%llx failed err %d\n",
2870 enable ? "Enable" : "Disable", feature, err);
2871 return err;
2872 }
2873
2874 MLX5E_SET_FEATURE(netdev, feature, enable);
2875 return 0;
2876}
2877
2878static int mlx5e_set_features(struct net_device *netdev,
2879 netdev_features_t features)
2880{
2881 int err;
2882
2883 err = mlx5e_handle_feature(netdev, features, NETIF_F_LRO,
2884 set_feature_lro);
2885 err |= mlx5e_handle_feature(netdev, features,
2886 NETIF_F_HW_VLAN_CTAG_FILTER,
2887 set_feature_vlan_filter);
2888 err |= mlx5e_handle_feature(netdev, features, NETIF_F_HW_TC,
2889 set_feature_tc_num_filters);
94cb1ebb
EBE
2890 err |= mlx5e_handle_feature(netdev, features, NETIF_F_RXALL,
2891 set_feature_rx_all);
36350114
GP
2892 err |= mlx5e_handle_feature(netdev, features, NETIF_F_HW_VLAN_CTAG_RX,
2893 set_feature_rx_vlan);
45bf454a
MG
2894#ifdef CONFIG_RFS_ACCEL
2895 err |= mlx5e_handle_feature(netdev, features, NETIF_F_NTUPLE,
2896 set_feature_arfs);
2897#endif
0e405443
GP
2898
2899 return err ? -EINVAL : 0;
f62b8bb8
AV
2900}
2901
2902static int mlx5e_change_mtu(struct net_device *netdev, int new_mtu)
2903{
2904 struct mlx5e_priv *priv = netdev_priv(netdev);
98e81b0a 2905 bool was_opened;
98e81b0a 2906 int err = 0;
506753b0 2907 bool reset;
f62b8bb8 2908
f62b8bb8 2909 mutex_lock(&priv->state_lock);
98e81b0a 2910
506753b0
TT
2911 reset = !priv->params.lro_en &&
2912 (priv->params.rq_wq_type !=
2913 MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ);
2914
98e81b0a 2915 was_opened = test_bit(MLX5E_STATE_OPENED, &priv->state);
506753b0 2916 if (was_opened && reset)
98e81b0a
AS
2917 mlx5e_close_locked(netdev);
2918
f62b8bb8 2919 netdev->mtu = new_mtu;
13f9bba7 2920 mlx5e_set_dev_port_mtu(netdev);
98e81b0a 2921
506753b0 2922 if (was_opened && reset)
98e81b0a
AS
2923 err = mlx5e_open_locked(netdev);
2924
f62b8bb8
AV
2925 mutex_unlock(&priv->state_lock);
2926
2927 return err;
2928}
2929
ef9814de
EBE
2930static int mlx5e_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
2931{
2932 switch (cmd) {
2933 case SIOCSHWTSTAMP:
2934 return mlx5e_hwstamp_set(dev, ifr);
2935 case SIOCGHWTSTAMP:
2936 return mlx5e_hwstamp_get(dev, ifr);
2937 default:
2938 return -EOPNOTSUPP;
2939 }
2940}
2941
66e49ded
SM
2942static int mlx5e_set_vf_mac(struct net_device *dev, int vf, u8 *mac)
2943{
2944 struct mlx5e_priv *priv = netdev_priv(dev);
2945 struct mlx5_core_dev *mdev = priv->mdev;
2946
2947 return mlx5_eswitch_set_vport_mac(mdev->priv.eswitch, vf + 1, mac);
2948}
2949
79aab093
MS
2950static int mlx5e_set_vf_vlan(struct net_device *dev, int vf, u16 vlan, u8 qos,
2951 __be16 vlan_proto)
66e49ded
SM
2952{
2953 struct mlx5e_priv *priv = netdev_priv(dev);
2954 struct mlx5_core_dev *mdev = priv->mdev;
2955
79aab093
MS
2956 if (vlan_proto != htons(ETH_P_8021Q))
2957 return -EPROTONOSUPPORT;
2958
66e49ded
SM
2959 return mlx5_eswitch_set_vport_vlan(mdev->priv.eswitch, vf + 1,
2960 vlan, qos);
2961}
2962
f942380c
MHY
2963static int mlx5e_set_vf_spoofchk(struct net_device *dev, int vf, bool setting)
2964{
2965 struct mlx5e_priv *priv = netdev_priv(dev);
2966 struct mlx5_core_dev *mdev = priv->mdev;
2967
2968 return mlx5_eswitch_set_vport_spoofchk(mdev->priv.eswitch, vf + 1, setting);
2969}
2970
1edc57e2
MHY
2971static int mlx5e_set_vf_trust(struct net_device *dev, int vf, bool setting)
2972{
2973 struct mlx5e_priv *priv = netdev_priv(dev);
2974 struct mlx5_core_dev *mdev = priv->mdev;
2975
2976 return mlx5_eswitch_set_vport_trust(mdev->priv.eswitch, vf + 1, setting);
2977}
bd77bf1c
MHY
2978
2979static int mlx5e_set_vf_rate(struct net_device *dev, int vf, int min_tx_rate,
2980 int max_tx_rate)
2981{
2982 struct mlx5e_priv *priv = netdev_priv(dev);
2983 struct mlx5_core_dev *mdev = priv->mdev;
2984
2985 if (min_tx_rate)
2986 return -EOPNOTSUPP;
2987
2988 return mlx5_eswitch_set_vport_rate(mdev->priv.eswitch, vf + 1,
2989 max_tx_rate);
2990}
2991
66e49ded
SM
2992static int mlx5_vport_link2ifla(u8 esw_link)
2993{
2994 switch (esw_link) {
2995 case MLX5_ESW_VPORT_ADMIN_STATE_DOWN:
2996 return IFLA_VF_LINK_STATE_DISABLE;
2997 case MLX5_ESW_VPORT_ADMIN_STATE_UP:
2998 return IFLA_VF_LINK_STATE_ENABLE;
2999 }
3000 return IFLA_VF_LINK_STATE_AUTO;
3001}
3002
3003static int mlx5_ifla_link2vport(u8 ifla_link)
3004{
3005 switch (ifla_link) {
3006 case IFLA_VF_LINK_STATE_DISABLE:
3007 return MLX5_ESW_VPORT_ADMIN_STATE_DOWN;
3008 case IFLA_VF_LINK_STATE_ENABLE:
3009 return MLX5_ESW_VPORT_ADMIN_STATE_UP;
3010 }
3011 return MLX5_ESW_VPORT_ADMIN_STATE_AUTO;
3012}
3013
3014static int mlx5e_set_vf_link_state(struct net_device *dev, int vf,
3015 int link_state)
3016{
3017 struct mlx5e_priv *priv = netdev_priv(dev);
3018 struct mlx5_core_dev *mdev = priv->mdev;
3019
3020 return mlx5_eswitch_set_vport_state(mdev->priv.eswitch, vf + 1,
3021 mlx5_ifla_link2vport(link_state));
3022}
3023
3024static int mlx5e_get_vf_config(struct net_device *dev,
3025 int vf, struct ifla_vf_info *ivi)
3026{
3027 struct mlx5e_priv *priv = netdev_priv(dev);
3028 struct mlx5_core_dev *mdev = priv->mdev;
3029 int err;
3030
3031 err = mlx5_eswitch_get_vport_config(mdev->priv.eswitch, vf + 1, ivi);
3032 if (err)
3033 return err;
3034 ivi->linkstate = mlx5_vport_link2ifla(ivi->linkstate);
3035 return 0;
3036}
3037
3038static int mlx5e_get_vf_stats(struct net_device *dev,
3039 int vf, struct ifla_vf_stats *vf_stats)
3040{
3041 struct mlx5e_priv *priv = netdev_priv(dev);
3042 struct mlx5_core_dev *mdev = priv->mdev;
3043
3044 return mlx5_eswitch_get_vport_stats(mdev->priv.eswitch, vf + 1,
3045 vf_stats);
3046}
3047
4a25730e
HHZ
3048void mlx5e_add_vxlan_port(struct net_device *netdev,
3049 struct udp_tunnel_info *ti)
b3f63c3d
MF
3050{
3051 struct mlx5e_priv *priv = netdev_priv(netdev);
3052
974c3f30
AD
3053 if (ti->type != UDP_TUNNEL_TYPE_VXLAN)
3054 return;
3055
b3f63c3d
MF
3056 if (!mlx5e_vxlan_allowed(priv->mdev))
3057 return;
3058
974c3f30 3059 mlx5e_vxlan_queue_work(priv, ti->sa_family, be16_to_cpu(ti->port), 1);
b3f63c3d
MF
3060}
3061
4a25730e
HHZ
3062void mlx5e_del_vxlan_port(struct net_device *netdev,
3063 struct udp_tunnel_info *ti)
b3f63c3d
MF
3064{
3065 struct mlx5e_priv *priv = netdev_priv(netdev);
3066
974c3f30
AD
3067 if (ti->type != UDP_TUNNEL_TYPE_VXLAN)
3068 return;
3069
b3f63c3d
MF
3070 if (!mlx5e_vxlan_allowed(priv->mdev))
3071 return;
3072
974c3f30 3073 mlx5e_vxlan_queue_work(priv, ti->sa_family, be16_to_cpu(ti->port), 0);
b3f63c3d
MF
3074}
3075
3076static netdev_features_t mlx5e_vxlan_features_check(struct mlx5e_priv *priv,
3077 struct sk_buff *skb,
3078 netdev_features_t features)
3079{
3080 struct udphdr *udph;
3081 u16 proto;
3082 u16 port = 0;
3083
3084 switch (vlan_get_protocol(skb)) {
3085 case htons(ETH_P_IP):
3086 proto = ip_hdr(skb)->protocol;
3087 break;
3088 case htons(ETH_P_IPV6):
3089 proto = ipv6_hdr(skb)->nexthdr;
3090 break;
3091 default:
3092 goto out;
3093 }
3094
3095 if (proto == IPPROTO_UDP) {
3096 udph = udp_hdr(skb);
3097 port = be16_to_cpu(udph->dest);
3098 }
3099
3100 /* Verify if UDP port is being offloaded by HW */
3101 if (port && mlx5e_vxlan_lookup_port(priv, port))
3102 return features;
3103
3104out:
3105 /* Disable CSUM and GSO if the udp dport is not offloaded by HW */
3106 return features & ~(NETIF_F_CSUM_MASK | NETIF_F_GSO_MASK);
3107}
3108
3109static netdev_features_t mlx5e_features_check(struct sk_buff *skb,
3110 struct net_device *netdev,
3111 netdev_features_t features)
3112{
3113 struct mlx5e_priv *priv = netdev_priv(netdev);
3114
3115 features = vlan_features_check(skb, features);
3116 features = vxlan_features_check(skb, features);
3117
3118 /* Validate if the tunneled packet is being offloaded by HW */
3119 if (skb->encapsulation &&
3120 (features & NETIF_F_CSUM_MASK || features & NETIF_F_GSO_MASK))
3121 return mlx5e_vxlan_features_check(priv, skb, features);
3122
3123 return features;
3124}
3125
3947ca18
DJ
3126static void mlx5e_tx_timeout(struct net_device *dev)
3127{
3128 struct mlx5e_priv *priv = netdev_priv(dev);
3129 bool sched_work = false;
3130 int i;
3131
3132 netdev_err(dev, "TX timeout detected\n");
3133
3134 for (i = 0; i < priv->params.num_channels * priv->params.num_tc; i++) {
3135 struct mlx5e_sq *sq = priv->txq_to_sq_map[i];
3136
2c1ccc99 3137 if (!netif_xmit_stopped(netdev_get_tx_queue(dev, i)))
3947ca18
DJ
3138 continue;
3139 sched_work = true;
c0f1147d 3140 clear_bit(MLX5E_SQ_STATE_ENABLED, &sq->state);
3947ca18
DJ
3141 netdev_err(dev, "TX timeout on queue: %d, SQ: 0x%x, CQ: 0x%x, SQ Cons: 0x%x SQ Prod: 0x%x\n",
3142 i, sq->sqn, sq->cq.mcq.cqn, sq->cc, sq->pc);
3143 }
3144
3145 if (sched_work && test_bit(MLX5E_STATE_OPENED, &priv->state))
3146 schedule_work(&priv->tx_timeout_work);
3147}
3148
86994156
RS
3149static int mlx5e_xdp_set(struct net_device *netdev, struct bpf_prog *prog)
3150{
3151 struct mlx5e_priv *priv = netdev_priv(netdev);
3152 struct bpf_prog *old_prog;
3153 int err = 0;
3154 bool reset, was_opened;
3155 int i;
3156
3157 mutex_lock(&priv->state_lock);
3158
3159 if ((netdev->features & NETIF_F_LRO) && prog) {
3160 netdev_warn(netdev, "can't set XDP while LRO is on, disable LRO first\n");
3161 err = -EINVAL;
3162 goto unlock;
3163 }
3164
3165 was_opened = test_bit(MLX5E_STATE_OPENED, &priv->state);
3166 /* no need for full reset when exchanging programs */
3167 reset = (!priv->xdp_prog || !prog);
3168
3169 if (was_opened && reset)
3170 mlx5e_close_locked(netdev);
c54c0629
DB
3171 if (was_opened && !reset) {
3172 /* num_channels is invariant here, so we can take the
3173 * batched reference right upfront.
3174 */
3175 prog = bpf_prog_add(prog, priv->params.num_channels);
3176 if (IS_ERR(prog)) {
3177 err = PTR_ERR(prog);
3178 goto unlock;
3179 }
3180 }
86994156 3181
c54c0629
DB
3182 /* exchange programs, extra prog reference we got from caller
3183 * as long as we don't fail from this point onwards.
3184 */
86994156 3185 old_prog = xchg(&priv->xdp_prog, prog);
86994156
RS
3186 if (old_prog)
3187 bpf_prog_put(old_prog);
3188
3189 if (reset) /* change RQ type according to priv->xdp_prog */
3190 mlx5e_set_rq_priv_params(priv);
3191
3192 if (was_opened && reset)
3193 mlx5e_open_locked(netdev);
3194
3195 if (!test_bit(MLX5E_STATE_OPENED, &priv->state) || reset)
3196 goto unlock;
3197
3198 /* exchanging programs w/o reset, we update ref counts on behalf
3199 * of the channels RQs here.
3200 */
86994156
RS
3201 for (i = 0; i < priv->params.num_channels; i++) {
3202 struct mlx5e_channel *c = priv->channel[i];
3203
c0f1147d 3204 clear_bit(MLX5E_RQ_STATE_ENABLED, &c->rq.state);
86994156
RS
3205 napi_synchronize(&c->napi);
3206 /* prevent mlx5e_poll_rx_cq from accessing rq->xdp_prog */
3207
3208 old_prog = xchg(&c->rq.xdp_prog, prog);
3209
c0f1147d 3210 set_bit(MLX5E_RQ_STATE_ENABLED, &c->rq.state);
86994156
RS
3211 /* napi_schedule in case we have missed anything */
3212 set_bit(MLX5E_CHANNEL_NAPI_SCHED, &c->flags);
3213 napi_schedule(&c->napi);
3214
3215 if (old_prog)
3216 bpf_prog_put(old_prog);
3217 }
3218
3219unlock:
3220 mutex_unlock(&priv->state_lock);
3221 return err;
3222}
3223
3224static bool mlx5e_xdp_attached(struct net_device *dev)
3225{
3226 struct mlx5e_priv *priv = netdev_priv(dev);
3227
3228 return !!priv->xdp_prog;
3229}
3230
3231static int mlx5e_xdp(struct net_device *dev, struct netdev_xdp *xdp)
3232{
3233 switch (xdp->command) {
3234 case XDP_SETUP_PROG:
3235 return mlx5e_xdp_set(dev, xdp->prog);
3236 case XDP_QUERY_PROG:
3237 xdp->prog_attached = mlx5e_xdp_attached(dev);
3238 return 0;
3239 default:
3240 return -EINVAL;
3241 }
3242}
3243
80378384
CO
3244#ifdef CONFIG_NET_POLL_CONTROLLER
3245/* Fake "interrupt" called by netpoll (eg netconsole) to send skbs without
3246 * reenabling interrupts.
3247 */
3248static void mlx5e_netpoll(struct net_device *dev)
3249{
3250 struct mlx5e_priv *priv = netdev_priv(dev);
3251 int i;
3252
3253 for (i = 0; i < priv->params.num_channels; i++)
3254 napi_schedule(&priv->channel[i]->napi);
3255}
3256#endif
3257
b0eed40e 3258static const struct net_device_ops mlx5e_netdev_ops_basic = {
f62b8bb8
AV
3259 .ndo_open = mlx5e_open,
3260 .ndo_stop = mlx5e_close,
3261 .ndo_start_xmit = mlx5e_xmit,
08fb1dac
SM
3262 .ndo_setup_tc = mlx5e_ndo_setup_tc,
3263 .ndo_select_queue = mlx5e_select_queue,
f62b8bb8
AV
3264 .ndo_get_stats64 = mlx5e_get_stats,
3265 .ndo_set_rx_mode = mlx5e_set_rx_mode,
3266 .ndo_set_mac_address = mlx5e_set_mac,
b0eed40e
SM
3267 .ndo_vlan_rx_add_vid = mlx5e_vlan_rx_add_vid,
3268 .ndo_vlan_rx_kill_vid = mlx5e_vlan_rx_kill_vid,
f62b8bb8 3269 .ndo_set_features = mlx5e_set_features,
b0eed40e
SM
3270 .ndo_change_mtu = mlx5e_change_mtu,
3271 .ndo_do_ioctl = mlx5e_ioctl,
507f0c81 3272 .ndo_set_tx_maxrate = mlx5e_set_tx_maxrate,
45bf454a
MG
3273#ifdef CONFIG_RFS_ACCEL
3274 .ndo_rx_flow_steer = mlx5e_rx_flow_steer,
3275#endif
3947ca18 3276 .ndo_tx_timeout = mlx5e_tx_timeout,
86994156 3277 .ndo_xdp = mlx5e_xdp,
80378384
CO
3278#ifdef CONFIG_NET_POLL_CONTROLLER
3279 .ndo_poll_controller = mlx5e_netpoll,
3280#endif
b0eed40e
SM
3281};
3282
3283static const struct net_device_ops mlx5e_netdev_ops_sriov = {
3284 .ndo_open = mlx5e_open,
3285 .ndo_stop = mlx5e_close,
3286 .ndo_start_xmit = mlx5e_xmit,
08fb1dac
SM
3287 .ndo_setup_tc = mlx5e_ndo_setup_tc,
3288 .ndo_select_queue = mlx5e_select_queue,
b0eed40e
SM
3289 .ndo_get_stats64 = mlx5e_get_stats,
3290 .ndo_set_rx_mode = mlx5e_set_rx_mode,
3291 .ndo_set_mac_address = mlx5e_set_mac,
3292 .ndo_vlan_rx_add_vid = mlx5e_vlan_rx_add_vid,
3293 .ndo_vlan_rx_kill_vid = mlx5e_vlan_rx_kill_vid,
3294 .ndo_set_features = mlx5e_set_features,
3295 .ndo_change_mtu = mlx5e_change_mtu,
3296 .ndo_do_ioctl = mlx5e_ioctl,
974c3f30
AD
3297 .ndo_udp_tunnel_add = mlx5e_add_vxlan_port,
3298 .ndo_udp_tunnel_del = mlx5e_del_vxlan_port,
507f0c81 3299 .ndo_set_tx_maxrate = mlx5e_set_tx_maxrate,
b3f63c3d 3300 .ndo_features_check = mlx5e_features_check,
45bf454a
MG
3301#ifdef CONFIG_RFS_ACCEL
3302 .ndo_rx_flow_steer = mlx5e_rx_flow_steer,
3303#endif
b0eed40e
SM
3304 .ndo_set_vf_mac = mlx5e_set_vf_mac,
3305 .ndo_set_vf_vlan = mlx5e_set_vf_vlan,
f942380c 3306 .ndo_set_vf_spoofchk = mlx5e_set_vf_spoofchk,
1edc57e2 3307 .ndo_set_vf_trust = mlx5e_set_vf_trust,
bd77bf1c 3308 .ndo_set_vf_rate = mlx5e_set_vf_rate,
b0eed40e
SM
3309 .ndo_get_vf_config = mlx5e_get_vf_config,
3310 .ndo_set_vf_link_state = mlx5e_set_vf_link_state,
3311 .ndo_get_vf_stats = mlx5e_get_vf_stats,
3947ca18 3312 .ndo_tx_timeout = mlx5e_tx_timeout,
86994156 3313 .ndo_xdp = mlx5e_xdp,
80378384
CO
3314#ifdef CONFIG_NET_POLL_CONTROLLER
3315 .ndo_poll_controller = mlx5e_netpoll,
3316#endif
370bad0f
OG
3317 .ndo_has_offload_stats = mlx5e_has_offload_stats,
3318 .ndo_get_offload_stats = mlx5e_get_offload_stats,
f62b8bb8
AV
3319};
3320
3321static int mlx5e_check_required_hca_cap(struct mlx5_core_dev *mdev)
3322{
3323 if (MLX5_CAP_GEN(mdev, port_type) != MLX5_CAP_PORT_TYPE_ETH)
3324 return -ENOTSUPP;
3325 if (!MLX5_CAP_GEN(mdev, eth_net_offloads) ||
3326 !MLX5_CAP_GEN(mdev, nic_flow_table) ||
3327 !MLX5_CAP_ETH(mdev, csum_cap) ||
3328 !MLX5_CAP_ETH(mdev, max_lso_cap) ||
3329 !MLX5_CAP_ETH(mdev, vlan_cap) ||
796a27ec
GP
3330 !MLX5_CAP_ETH(mdev, rss_ind_tbl_cap) ||
3331 MLX5_CAP_FLOWTABLE(mdev,
3332 flow_table_properties_nic_receive.max_ft_level)
3333 < 3) {
f62b8bb8
AV
3334 mlx5_core_warn(mdev,
3335 "Not creating net device, some required device capabilities are missing\n");
3336 return -ENOTSUPP;
3337 }
66189961
TT
3338 if (!MLX5_CAP_ETH(mdev, self_lb_en_modifiable))
3339 mlx5_core_warn(mdev, "Self loop back prevention is not supported\n");
7524a5d8
GP
3340 if (!MLX5_CAP_GEN(mdev, cq_moderation))
3341 mlx5_core_warn(mdev, "CQ modiration is not supported\n");
66189961 3342
f62b8bb8
AV
3343 return 0;
3344}
3345
58d52291
AS
3346u16 mlx5e_get_max_inline_cap(struct mlx5_core_dev *mdev)
3347{
3348 int bf_buf_size = (1 << MLX5_CAP_GEN(mdev, log_bf_reg_size)) / 2;
3349
3350 return bf_buf_size -
3351 sizeof(struct mlx5e_tx_wqe) +
3352 2 /*sizeof(mlx5e_tx_wqe.inline_hdr_start)*/;
3353}
3354
d8c9660d
TT
3355void mlx5e_build_default_indir_rqt(struct mlx5_core_dev *mdev,
3356 u32 *indirection_rqt, int len,
85082dba
TT
3357 int num_channels)
3358{
d8c9660d
TT
3359 int node = mdev->priv.numa_node;
3360 int node_num_of_cores;
85082dba
TT
3361 int i;
3362
d8c9660d
TT
3363 if (node == -1)
3364 node = first_online_node;
3365
3366 node_num_of_cores = cpumask_weight(cpumask_of_node(node));
3367
3368 if (node_num_of_cores)
3369 num_channels = min_t(int, num_channels, node_num_of_cores);
3370
85082dba
TT
3371 for (i = 0; i < len; i++)
3372 indirection_rqt[i] = i % num_channels;
3373}
3374
b797a684
SM
3375static int mlx5e_get_pci_bw(struct mlx5_core_dev *mdev, u32 *pci_bw)
3376{
3377 enum pcie_link_width width;
3378 enum pci_bus_speed speed;
3379 int err = 0;
3380
3381 err = pcie_get_minimum_link(mdev->pdev, &speed, &width);
3382 if (err)
3383 return err;
3384
3385 if (speed == PCI_SPEED_UNKNOWN || width == PCIE_LNK_WIDTH_UNKNOWN)
3386 return -EINVAL;
3387
3388 switch (speed) {
3389 case PCIE_SPEED_2_5GT:
3390 *pci_bw = 2500 * width;
3391 break;
3392 case PCIE_SPEED_5_0GT:
3393 *pci_bw = 5000 * width;
3394 break;
3395 case PCIE_SPEED_8_0GT:
3396 *pci_bw = 8000 * width;
3397 break;
3398 default:
3399 return -EINVAL;
3400 }
3401
3402 return 0;
3403}
3404
3405static bool cqe_compress_heuristic(u32 link_speed, u32 pci_bw)
3406{
3407 return (link_speed && pci_bw &&
3408 (pci_bw < 40000) && (pci_bw < link_speed));
3409}
3410
9908aa29
TT
3411void mlx5e_set_rx_cq_mode_params(struct mlx5e_params *params, u8 cq_period_mode)
3412{
3413 params->rx_cq_period_mode = cq_period_mode;
3414
3415 params->rx_cq_moderation.pkts =
3416 MLX5E_PARAMS_DEFAULT_RX_CQ_MODERATION_PKTS;
3417 params->rx_cq_moderation.usec =
3418 MLX5E_PARAMS_DEFAULT_RX_CQ_MODERATION_USEC;
3419
3420 if (cq_period_mode == MLX5_CQ_PERIOD_MODE_START_FROM_CQE)
3421 params->rx_cq_moderation.usec =
3422 MLX5E_PARAMS_DEFAULT_RX_CQ_MODERATION_USEC_FROM_CQE;
3423}
3424
cff92d7c
HHZ
3425static void mlx5e_query_min_inline(struct mlx5_core_dev *mdev,
3426 u8 *min_inline_mode)
3427{
3428 switch (MLX5_CAP_ETH(mdev, wqe_inline_mode)) {
34e4e990 3429 case MLX5_CAP_INLINE_MODE_L2:
cff92d7c
HHZ
3430 *min_inline_mode = MLX5_INLINE_MODE_L2;
3431 break;
34e4e990
RD
3432 case MLX5_CAP_INLINE_MODE_VPORT_CONTEXT:
3433 mlx5_query_nic_vport_min_inline(mdev, 0, min_inline_mode);
cff92d7c 3434 break;
34e4e990 3435 case MLX5_CAP_INLINE_MODE_NOT_REQUIRED:
cff92d7c
HHZ
3436 *min_inline_mode = MLX5_INLINE_MODE_NONE;
3437 break;
3438 }
3439}
3440
2b029556
SM
3441u32 mlx5e_choose_lro_timeout(struct mlx5_core_dev *mdev, u32 wanted_timeout)
3442{
3443 int i;
3444
3445 /* The supported periods are organized in ascending order */
3446 for (i = 0; i < MLX5E_LRO_TIMEOUT_ARR_SIZE - 1; i++)
3447 if (MLX5_CAP_ETH(mdev, lro_timer_supported_periods[i]) >= wanted_timeout)
3448 break;
3449
3450 return MLX5_CAP_ETH(mdev, lro_timer_supported_periods[i]);
3451}
3452
6bfd390b
HHZ
3453static void mlx5e_build_nic_netdev_priv(struct mlx5_core_dev *mdev,
3454 struct net_device *netdev,
127ea380
HHZ
3455 const struct mlx5e_profile *profile,
3456 void *ppriv)
f62b8bb8
AV
3457{
3458 struct mlx5e_priv *priv = netdev_priv(netdev);
b797a684
SM
3459 u32 link_speed = 0;
3460 u32 pci_bw = 0;
cb3c7fd4
GR
3461 u8 cq_period_mode = MLX5_CAP_GEN(mdev, cq_period_start_from_cqe) ?
3462 MLX5_CQ_PERIOD_MODE_START_FROM_CQE :
3463 MLX5_CQ_PERIOD_MODE_START_FROM_EQE;
f62b8bb8 3464
2fc4bfb7
SM
3465 priv->mdev = mdev;
3466 priv->netdev = netdev;
3467 priv->params.num_channels = profile->max_nch(mdev);
3468 priv->profile = profile;
3469 priv->ppriv = ppriv;
3470
2b029556
SM
3471 priv->params.lro_timeout =
3472 mlx5e_choose_lro_timeout(mdev, MLX5E_DEFAULT_LRO_TIMEOUT);
3473
2fc4bfb7 3474 priv->params.log_sq_size = MLX5E_PARAMS_DEFAULT_LOG_SQ_SIZE;
461017cb 3475
b797a684 3476 /* set CQE compression */
9bcc8606 3477 priv->params.rx_cqe_compress_def = false;
b797a684
SM
3478 if (MLX5_CAP_GEN(mdev, cqe_compression) &&
3479 MLX5_CAP_GEN(mdev, vport_group_manager)) {
3480 mlx5e_get_max_linkspeed(mdev, &link_speed);
3481 mlx5e_get_pci_bw(mdev, &pci_bw);
3482 mlx5_core_dbg(mdev, "Max link speed = %d, PCI BW = %d\n",
3483 link_speed, pci_bw);
9bcc8606 3484 priv->params.rx_cqe_compress_def =
b797a684
SM
3485 cqe_compress_heuristic(link_speed, pci_bw);
3486 }
b797a684 3487
2fc4bfb7
SM
3488 mlx5e_set_rq_priv_params(priv);
3489 if (priv->params.rq_wq_type == MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ)
461017cb 3490 priv->params.lro_en = true;
9908aa29 3491
cb3c7fd4
GR
3492 priv->params.rx_am_enabled = MLX5_CAP_GEN(mdev, cq_moderation);
3493 mlx5e_set_rx_cq_mode_params(&priv->params, cq_period_mode);
9908aa29
TT
3494
3495 priv->params.tx_cq_moderation.usec =
f62b8bb8 3496 MLX5E_PARAMS_DEFAULT_TX_CQ_MODERATION_USEC;
9908aa29 3497 priv->params.tx_cq_moderation.pkts =
f62b8bb8 3498 MLX5E_PARAMS_DEFAULT_TX_CQ_MODERATION_PKTS;
58d52291 3499 priv->params.tx_max_inline = mlx5e_get_max_inline_cap(mdev);
cff92d7c 3500 mlx5e_query_min_inline(mdev, &priv->params.tx_min_inline_mode);
f62b8bb8 3501 priv->params.num_tc = 1;
2be6967c 3502 priv->params.rss_hfunc = ETH_RSS_HASH_XOR;
f62b8bb8 3503
57afead5
AS
3504 netdev_rss_key_fill(priv->params.toeplitz_hash_key,
3505 sizeof(priv->params.toeplitz_hash_key));
3506
d8c9660d 3507 mlx5e_build_default_indir_rqt(mdev, priv->params.indirection_rqt,
6bfd390b 3508 MLX5E_INDIR_RQT_SIZE, profile->max_nch(mdev));
2d75b2bc 3509
e4b85508
SM
3510 priv->params.lro_wqe_sz =
3511 MLX5E_PARAMS_DEFAULT_LRO_WQE_SZ -
3512 /* Extra room needed for build_skb */
3513 MLX5_RX_HEADROOM -
3514 SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
f62b8bb8 3515
9908aa29 3516 /* Initialize pflags */
59ece1c9
SD
3517 MLX5E_SET_PFLAG(priv, MLX5E_PFLAG_RX_CQE_BASED_MODER,
3518 priv->params.rx_cq_period_mode == MLX5_CQ_PERIOD_MODE_START_FROM_CQE);
9bcc8606 3519 MLX5E_SET_PFLAG(priv, MLX5E_PFLAG_RX_CQE_COMPRESS, priv->params.rx_cqe_compress_def);
9908aa29 3520
f62b8bb8
AV
3521 mutex_init(&priv->state_lock);
3522
3523 INIT_WORK(&priv->update_carrier_work, mlx5e_update_carrier_work);
3524 INIT_WORK(&priv->set_rx_mode_work, mlx5e_set_rx_mode_work);
3947ca18 3525 INIT_WORK(&priv->tx_timeout_work, mlx5e_tx_timeout_work);
f62b8bb8
AV
3526 INIT_DELAYED_WORK(&priv->update_stats_work, mlx5e_update_stats_work);
3527}
3528
3529static void mlx5e_set_netdev_dev_addr(struct net_device *netdev)
3530{
3531 struct mlx5e_priv *priv = netdev_priv(netdev);
3532
e1d7d349 3533 mlx5_query_nic_vport_mac_address(priv->mdev, 0, netdev->dev_addr);
108805fc
SM
3534 if (is_zero_ether_addr(netdev->dev_addr) &&
3535 !MLX5_CAP_GEN(priv->mdev, vport_group_manager)) {
3536 eth_hw_addr_random(netdev);
3537 mlx5_core_info(priv->mdev, "Assigned random MAC address %pM\n", netdev->dev_addr);
3538 }
f62b8bb8
AV
3539}
3540
cb67b832
HHZ
3541static const struct switchdev_ops mlx5e_switchdev_ops = {
3542 .switchdev_port_attr_get = mlx5e_attr_get,
3543};
3544
6bfd390b 3545static void mlx5e_build_nic_netdev(struct net_device *netdev)
f62b8bb8
AV
3546{
3547 struct mlx5e_priv *priv = netdev_priv(netdev);
3548 struct mlx5_core_dev *mdev = priv->mdev;
94cb1ebb
EBE
3549 bool fcs_supported;
3550 bool fcs_enabled;
f62b8bb8
AV
3551
3552 SET_NETDEV_DEV(netdev, &mdev->pdev->dev);
3553
08fb1dac 3554 if (MLX5_CAP_GEN(mdev, vport_group_manager)) {
b0eed40e 3555 netdev->netdev_ops = &mlx5e_netdev_ops_sriov;
08fb1dac 3556#ifdef CONFIG_MLX5_CORE_EN_DCB
80653f73
HN
3557 if (MLX5_CAP_GEN(mdev, qos))
3558 netdev->dcbnl_ops = &mlx5e_dcbnl_ops;
08fb1dac
SM
3559#endif
3560 } else {
b0eed40e 3561 netdev->netdev_ops = &mlx5e_netdev_ops_basic;
08fb1dac 3562 }
66e49ded 3563
f62b8bb8
AV
3564 netdev->watchdog_timeo = 15 * HZ;
3565
3566 netdev->ethtool_ops = &mlx5e_ethtool_ops;
3567
12be4b21 3568 netdev->vlan_features |= NETIF_F_SG;
f62b8bb8
AV
3569 netdev->vlan_features |= NETIF_F_IP_CSUM;
3570 netdev->vlan_features |= NETIF_F_IPV6_CSUM;
3571 netdev->vlan_features |= NETIF_F_GRO;
3572 netdev->vlan_features |= NETIF_F_TSO;
3573 netdev->vlan_features |= NETIF_F_TSO6;
3574 netdev->vlan_features |= NETIF_F_RXCSUM;
3575 netdev->vlan_features |= NETIF_F_RXHASH;
3576
3577 if (!!MLX5_CAP_ETH(mdev, lro_cap))
3578 netdev->vlan_features |= NETIF_F_LRO;
3579
3580 netdev->hw_features = netdev->vlan_features;
e4cf27bd 3581 netdev->hw_features |= NETIF_F_HW_VLAN_CTAG_TX;
f62b8bb8
AV
3582 netdev->hw_features |= NETIF_F_HW_VLAN_CTAG_RX;
3583 netdev->hw_features |= NETIF_F_HW_VLAN_CTAG_FILTER;
3584
b3f63c3d 3585 if (mlx5e_vxlan_allowed(mdev)) {
b49663c8
AD
3586 netdev->hw_features |= NETIF_F_GSO_UDP_TUNNEL |
3587 NETIF_F_GSO_UDP_TUNNEL_CSUM |
3588 NETIF_F_GSO_PARTIAL;
b3f63c3d 3589 netdev->hw_enc_features |= NETIF_F_IP_CSUM;
f3ed653c 3590 netdev->hw_enc_features |= NETIF_F_IPV6_CSUM;
b3f63c3d
MF
3591 netdev->hw_enc_features |= NETIF_F_TSO;
3592 netdev->hw_enc_features |= NETIF_F_TSO6;
b3f63c3d 3593 netdev->hw_enc_features |= NETIF_F_GSO_UDP_TUNNEL;
b49663c8
AD
3594 netdev->hw_enc_features |= NETIF_F_GSO_UDP_TUNNEL_CSUM |
3595 NETIF_F_GSO_PARTIAL;
3596 netdev->gso_partial_features = NETIF_F_GSO_UDP_TUNNEL_CSUM;
b3f63c3d
MF
3597 }
3598
94cb1ebb
EBE
3599 mlx5_query_port_fcs(mdev, &fcs_supported, &fcs_enabled);
3600
3601 if (fcs_supported)
3602 netdev->hw_features |= NETIF_F_RXALL;
3603
f62b8bb8
AV
3604 netdev->features = netdev->hw_features;
3605 if (!priv->params.lro_en)
3606 netdev->features &= ~NETIF_F_LRO;
3607
94cb1ebb
EBE
3608 if (fcs_enabled)
3609 netdev->features &= ~NETIF_F_RXALL;
3610
e8f887ac
AV
3611#define FT_CAP(f) MLX5_CAP_FLOWTABLE(mdev, flow_table_properties_nic_receive.f)
3612 if (FT_CAP(flow_modify_en) &&
3613 FT_CAP(modify_root) &&
3614 FT_CAP(identified_miss_table_mode) &&
1cabe6b0
MG
3615 FT_CAP(flow_table_modify)) {
3616 netdev->hw_features |= NETIF_F_HW_TC;
3617#ifdef CONFIG_RFS_ACCEL
3618 netdev->hw_features |= NETIF_F_NTUPLE;
3619#endif
3620 }
e8f887ac 3621
f62b8bb8
AV
3622 netdev->features |= NETIF_F_HIGHDMA;
3623
3624 netdev->priv_flags |= IFF_UNICAST_FLT;
3625
3626 mlx5e_set_netdev_dev_addr(netdev);
cb67b832
HHZ
3627
3628#ifdef CONFIG_NET_SWITCHDEV
3629 if (MLX5_CAP_GEN(mdev, vport_group_manager))
3630 netdev->switchdev_ops = &mlx5e_switchdev_ops;
3631#endif
f62b8bb8
AV
3632}
3633
593cf338
RS
3634static void mlx5e_create_q_counter(struct mlx5e_priv *priv)
3635{
3636 struct mlx5_core_dev *mdev = priv->mdev;
3637 int err;
3638
3639 err = mlx5_core_alloc_q_counter(mdev, &priv->q_counter);
3640 if (err) {
3641 mlx5_core_warn(mdev, "alloc queue counter failed, %d\n", err);
3642 priv->q_counter = 0;
3643 }
3644}
3645
3646static void mlx5e_destroy_q_counter(struct mlx5e_priv *priv)
3647{
3648 if (!priv->q_counter)
3649 return;
3650
3651 mlx5_core_dealloc_q_counter(priv->mdev, priv->q_counter);
3652}
3653
6bfd390b
HHZ
3654static void mlx5e_nic_init(struct mlx5_core_dev *mdev,
3655 struct net_device *netdev,
127ea380
HHZ
3656 const struct mlx5e_profile *profile,
3657 void *ppriv)
6bfd390b
HHZ
3658{
3659 struct mlx5e_priv *priv = netdev_priv(netdev);
3660
127ea380 3661 mlx5e_build_nic_netdev_priv(mdev, netdev, profile, ppriv);
6bfd390b
HHZ
3662 mlx5e_build_nic_netdev(netdev);
3663 mlx5e_vxlan_init(priv);
3664}
3665
3666static void mlx5e_nic_cleanup(struct mlx5e_priv *priv)
3667{
3668 mlx5e_vxlan_cleanup(priv);
127ea380 3669
a055c19b
DB
3670 if (priv->xdp_prog)
3671 bpf_prog_put(priv->xdp_prog);
6bfd390b
HHZ
3672}
3673
3674static int mlx5e_init_nic_rx(struct mlx5e_priv *priv)
3675{
3676 struct mlx5_core_dev *mdev = priv->mdev;
3677 int err;
3678 int i;
3679
3680 err = mlx5e_create_indirect_rqts(priv);
3681 if (err) {
3682 mlx5_core_warn(mdev, "create indirect rqts failed, %d\n", err);
3683 return err;
3684 }
3685
3686 err = mlx5e_create_direct_rqts(priv);
3687 if (err) {
3688 mlx5_core_warn(mdev, "create direct rqts failed, %d\n", err);
3689 goto err_destroy_indirect_rqts;
3690 }
3691
3692 err = mlx5e_create_indirect_tirs(priv);
3693 if (err) {
3694 mlx5_core_warn(mdev, "create indirect tirs failed, %d\n", err);
3695 goto err_destroy_direct_rqts;
3696 }
3697
3698 err = mlx5e_create_direct_tirs(priv);
3699 if (err) {
3700 mlx5_core_warn(mdev, "create direct tirs failed, %d\n", err);
3701 goto err_destroy_indirect_tirs;
3702 }
3703
3704 err = mlx5e_create_flow_steering(priv);
3705 if (err) {
3706 mlx5_core_warn(mdev, "create flow steering failed, %d\n", err);
3707 goto err_destroy_direct_tirs;
3708 }
3709
3710 err = mlx5e_tc_init(priv);
3711 if (err)
3712 goto err_destroy_flow_steering;
3713
3714 return 0;
3715
3716err_destroy_flow_steering:
3717 mlx5e_destroy_flow_steering(priv);
3718err_destroy_direct_tirs:
3719 mlx5e_destroy_direct_tirs(priv);
3720err_destroy_indirect_tirs:
3721 mlx5e_destroy_indirect_tirs(priv);
3722err_destroy_direct_rqts:
3723 for (i = 0; i < priv->profile->max_nch(mdev); i++)
3724 mlx5e_destroy_rqt(priv, &priv->direct_tir[i].rqt);
3725err_destroy_indirect_rqts:
3726 mlx5e_destroy_rqt(priv, &priv->indir_rqt);
3727 return err;
3728}
3729
3730static void mlx5e_cleanup_nic_rx(struct mlx5e_priv *priv)
3731{
3732 int i;
3733
3734 mlx5e_tc_cleanup(priv);
3735 mlx5e_destroy_flow_steering(priv);
3736 mlx5e_destroy_direct_tirs(priv);
3737 mlx5e_destroy_indirect_tirs(priv);
3738 for (i = 0; i < priv->profile->max_nch(priv->mdev); i++)
3739 mlx5e_destroy_rqt(priv, &priv->direct_tir[i].rqt);
3740 mlx5e_destroy_rqt(priv, &priv->indir_rqt);
3741}
3742
3743static int mlx5e_init_nic_tx(struct mlx5e_priv *priv)
3744{
3745 int err;
3746
3747 err = mlx5e_create_tises(priv);
3748 if (err) {
3749 mlx5_core_warn(priv->mdev, "create tises failed, %d\n", err);
3750 return err;
3751 }
3752
3753#ifdef CONFIG_MLX5_CORE_EN_DCB
e207b7e9 3754 mlx5e_dcbnl_initialize(priv);
6bfd390b
HHZ
3755#endif
3756 return 0;
3757}
3758
3759static void mlx5e_nic_enable(struct mlx5e_priv *priv)
3760{
3761 struct net_device *netdev = priv->netdev;
3762 struct mlx5_core_dev *mdev = priv->mdev;
127ea380
HHZ
3763 struct mlx5_eswitch *esw = mdev->priv.eswitch;
3764 struct mlx5_eswitch_rep rep;
6bfd390b 3765
7907f23a
AH
3766 mlx5_lag_add(mdev, netdev);
3767
6bfd390b 3768 mlx5e_enable_async_events(priv);
127ea380
HHZ
3769
3770 if (MLX5_CAP_GEN(mdev, vport_group_manager)) {
dbe413e3 3771 mlx5_query_nic_vport_mac_address(mdev, 0, rep.hw_id);
cb67b832
HHZ
3772 rep.load = mlx5e_nic_rep_load;
3773 rep.unload = mlx5e_nic_rep_unload;
9deb2241 3774 rep.vport = FDB_UPLINK_VPORT;
726293f1 3775 rep.netdev = netdev;
9deb2241 3776 mlx5_eswitch_register_vport_rep(esw, 0, &rep);
127ea380 3777 }
610e89e0
SM
3778
3779 if (netdev->reg_state != NETREG_REGISTERED)
3780 return;
3781
3782 /* Device already registered: sync netdev system state */
3783 if (mlx5e_vxlan_allowed(mdev)) {
3784 rtnl_lock();
3785 udp_tunnel_get_rx_info(netdev);
3786 rtnl_unlock();
3787 }
3788
3789 queue_work(priv->wq, &priv->set_rx_mode_work);
6bfd390b
HHZ
3790}
3791
3792static void mlx5e_nic_disable(struct mlx5e_priv *priv)
3793{
3deef8ce
SM
3794 struct mlx5_core_dev *mdev = priv->mdev;
3795 struct mlx5_eswitch *esw = mdev->priv.eswitch;
3796
6bfd390b 3797 queue_work(priv->wq, &priv->set_rx_mode_work);
3deef8ce
SM
3798 if (MLX5_CAP_GEN(mdev, vport_group_manager))
3799 mlx5_eswitch_unregister_vport_rep(esw, 0);
6bfd390b 3800 mlx5e_disable_async_events(priv);
3deef8ce 3801 mlx5_lag_remove(mdev);
6bfd390b
HHZ
3802}
3803
3804static const struct mlx5e_profile mlx5e_nic_profile = {
3805 .init = mlx5e_nic_init,
3806 .cleanup = mlx5e_nic_cleanup,
3807 .init_rx = mlx5e_init_nic_rx,
3808 .cleanup_rx = mlx5e_cleanup_nic_rx,
3809 .init_tx = mlx5e_init_nic_tx,
3810 .cleanup_tx = mlx5e_cleanup_nic_tx,
3811 .enable = mlx5e_nic_enable,
3812 .disable = mlx5e_nic_disable,
3813 .update_stats = mlx5e_update_stats,
3814 .max_nch = mlx5e_get_max_num_channels,
3815 .max_tc = MLX5E_MAX_NUM_TC,
3816};
3817
26e59d80
MHY
3818struct net_device *mlx5e_create_netdev(struct mlx5_core_dev *mdev,
3819 const struct mlx5e_profile *profile,
3820 void *ppriv)
f62b8bb8 3821{
26e59d80 3822 int nch = profile->max_nch(mdev);
f62b8bb8
AV
3823 struct net_device *netdev;
3824 struct mlx5e_priv *priv;
f62b8bb8 3825
08fb1dac 3826 netdev = alloc_etherdev_mqs(sizeof(struct mlx5e_priv),
6bfd390b 3827 nch * profile->max_tc,
08fb1dac 3828 nch);
f62b8bb8
AV
3829 if (!netdev) {
3830 mlx5_core_err(mdev, "alloc_etherdev_mqs() failed\n");
3831 return NULL;
3832 }
3833
127ea380 3834 profile->init(mdev, netdev, profile, ppriv);
f62b8bb8
AV
3835
3836 netif_carrier_off(netdev);
3837
3838 priv = netdev_priv(netdev);
3839
7bb29755
MF
3840 priv->wq = create_singlethread_workqueue("mlx5e");
3841 if (!priv->wq)
26e59d80
MHY
3842 goto err_cleanup_nic;
3843
3844 return netdev;
3845
3846err_cleanup_nic:
3847 profile->cleanup(priv);
3848 free_netdev(netdev);
3849
3850 return NULL;
3851}
3852
3853int mlx5e_attach_netdev(struct mlx5_core_dev *mdev, struct net_device *netdev)
3854{
3855 const struct mlx5e_profile *profile;
3856 struct mlx5e_priv *priv;
b80f71f5 3857 u16 max_mtu;
26e59d80
MHY
3858 int err;
3859
3860 priv = netdev_priv(netdev);
3861 profile = priv->profile;
3862 clear_bit(MLX5E_STATE_DESTROYING, &priv->state);
7bb29755 3863
6bfd390b
HHZ
3864 err = profile->init_tx(priv);
3865 if (err)
ec8b9981 3866 goto out;
5c50368f
AS
3867
3868 err = mlx5e_open_drop_rq(priv);
3869 if (err) {
3870 mlx5_core_err(mdev, "open drop rq failed, %d\n", err);
6bfd390b 3871 goto err_cleanup_tx;
5c50368f
AS
3872 }
3873
6bfd390b
HHZ
3874 err = profile->init_rx(priv);
3875 if (err)
5c50368f 3876 goto err_close_drop_rq;
5c50368f 3877
593cf338
RS
3878 mlx5e_create_q_counter(priv);
3879
33cfaaa8 3880 mlx5e_init_l2_addr(priv);
5c50368f 3881
b80f71f5
JW
3882 /* MTU range: 68 - hw-specific max */
3883 netdev->min_mtu = ETH_MIN_MTU;
3884 mlx5_query_port_max_mtu(priv->mdev, &max_mtu, 1);
3885 netdev->max_mtu = MLX5E_HW2SW_MTU(max_mtu);
3886
13f9bba7
SM
3887 mlx5e_set_dev_port_mtu(netdev);
3888
6bfd390b
HHZ
3889 if (profile->enable)
3890 profile->enable(priv);
f62b8bb8 3891
26e59d80
MHY
3892 rtnl_lock();
3893 if (netif_running(netdev))
3894 mlx5e_open(netdev);
3895 netif_device_attach(netdev);
3896 rtnl_unlock();
f62b8bb8 3897
26e59d80 3898 return 0;
5c50368f
AS
3899
3900err_close_drop_rq:
3901 mlx5e_close_drop_rq(priv);
3902
6bfd390b
HHZ
3903err_cleanup_tx:
3904 profile->cleanup_tx(priv);
5c50368f 3905
26e59d80
MHY
3906out:
3907 return err;
f62b8bb8
AV
3908}
3909
127ea380
HHZ
3910static void mlx5e_register_vport_rep(struct mlx5_core_dev *mdev)
3911{
3912 struct mlx5_eswitch *esw = mdev->priv.eswitch;
3913 int total_vfs = MLX5_TOTAL_VPORTS(mdev);
3914 int vport;
dbe413e3 3915 u8 mac[ETH_ALEN];
127ea380
HHZ
3916
3917 if (!MLX5_CAP_GEN(mdev, vport_group_manager))
3918 return;
3919
dbe413e3
HHZ
3920 mlx5_query_nic_vport_mac_address(mdev, 0, mac);
3921
127ea380
HHZ
3922 for (vport = 1; vport < total_vfs; vport++) {
3923 struct mlx5_eswitch_rep rep;
3924
cb67b832
HHZ
3925 rep.load = mlx5e_vport_rep_load;
3926 rep.unload = mlx5e_vport_rep_unload;
127ea380 3927 rep.vport = vport;
dbe413e3 3928 ether_addr_copy(rep.hw_id, mac);
9deb2241 3929 mlx5_eswitch_register_vport_rep(esw, vport, &rep);
127ea380
HHZ
3930 }
3931}
3932
26e59d80
MHY
3933void mlx5e_detach_netdev(struct mlx5_core_dev *mdev, struct net_device *netdev)
3934{
3935 struct mlx5e_priv *priv = netdev_priv(netdev);
3936 const struct mlx5e_profile *profile = priv->profile;
3937
3938 set_bit(MLX5E_STATE_DESTROYING, &priv->state);
26e59d80
MHY
3939
3940 rtnl_lock();
3941 if (netif_running(netdev))
3942 mlx5e_close(netdev);
3943 netif_device_detach(netdev);
3944 rtnl_unlock();
3945
37f304d1
SM
3946 if (profile->disable)
3947 profile->disable(priv);
3948 flush_workqueue(priv->wq);
3949
26e59d80
MHY
3950 mlx5e_destroy_q_counter(priv);
3951 profile->cleanup_rx(priv);
3952 mlx5e_close_drop_rq(priv);
3953 profile->cleanup_tx(priv);
26e59d80
MHY
3954 cancel_delayed_work_sync(&priv->update_stats_work);
3955}
3956
3957/* mlx5e_attach and mlx5e_detach scope should be only creating/destroying
3958 * hardware contexts and to connect it to the current netdev.
3959 */
3960static int mlx5e_attach(struct mlx5_core_dev *mdev, void *vpriv)
3961{
3962 struct mlx5e_priv *priv = vpriv;
3963 struct net_device *netdev = priv->netdev;
3964 int err;
3965
3966 if (netif_device_present(netdev))
3967 return 0;
3968
3969 err = mlx5e_create_mdev_resources(mdev);
3970 if (err)
3971 return err;
3972
3973 err = mlx5e_attach_netdev(mdev, netdev);
3974 if (err) {
3975 mlx5e_destroy_mdev_resources(mdev);
3976 return err;
3977 }
3978
3979 return 0;
3980}
3981
3982static void mlx5e_detach(struct mlx5_core_dev *mdev, void *vpriv)
3983{
3984 struct mlx5e_priv *priv = vpriv;
3985 struct net_device *netdev = priv->netdev;
3986
3987 if (!netif_device_present(netdev))
3988 return;
3989
3990 mlx5e_detach_netdev(mdev, netdev);
3991 mlx5e_destroy_mdev_resources(mdev);
3992}
3993
b50d292b
HHZ
3994static void *mlx5e_add(struct mlx5_core_dev *mdev)
3995{
127ea380 3996 struct mlx5_eswitch *esw = mdev->priv.eswitch;
26e59d80 3997 int total_vfs = MLX5_TOTAL_VPORTS(mdev);
127ea380 3998 void *ppriv = NULL;
26e59d80
MHY
3999 void *priv;
4000 int vport;
4001 int err;
4002 struct net_device *netdev;
b50d292b 4003
26e59d80
MHY
4004 err = mlx5e_check_required_hca_cap(mdev);
4005 if (err)
b50d292b
HHZ
4006 return NULL;
4007
127ea380
HHZ
4008 mlx5e_register_vport_rep(mdev);
4009
4010 if (MLX5_CAP_GEN(mdev, vport_group_manager))
4011 ppriv = &esw->offloads.vport_reps[0];
4012
26e59d80
MHY
4013 netdev = mlx5e_create_netdev(mdev, &mlx5e_nic_profile, ppriv);
4014 if (!netdev) {
4015 mlx5_core_err(mdev, "mlx5e_create_netdev failed\n");
4016 goto err_unregister_reps;
4017 }
4018
4019 priv = netdev_priv(netdev);
4020
4021 err = mlx5e_attach(mdev, priv);
4022 if (err) {
4023 mlx5_core_err(mdev, "mlx5e_attach failed, %d\n", err);
4024 goto err_destroy_netdev;
4025 }
4026
4027 err = register_netdev(netdev);
4028 if (err) {
4029 mlx5_core_err(mdev, "register_netdev failed, %d\n", err);
4030 goto err_detach;
b50d292b 4031 }
26e59d80
MHY
4032
4033 return priv;
4034
4035err_detach:
4036 mlx5e_detach(mdev, priv);
4037
4038err_destroy_netdev:
4039 mlx5e_destroy_netdev(mdev, priv);
4040
4041err_unregister_reps:
4042 for (vport = 1; vport < total_vfs; vport++)
4043 mlx5_eswitch_unregister_vport_rep(esw, vport);
4044
4045 return NULL;
b50d292b
HHZ
4046}
4047
cb67b832 4048void mlx5e_destroy_netdev(struct mlx5_core_dev *mdev, struct mlx5e_priv *priv)
f62b8bb8 4049{
6bfd390b 4050 const struct mlx5e_profile *profile = priv->profile;
f62b8bb8
AV
4051 struct net_device *netdev = priv->netdev;
4052
7bb29755 4053 destroy_workqueue(priv->wq);
6bfd390b
HHZ
4054 if (profile->cleanup)
4055 profile->cleanup(priv);
26e59d80 4056 free_netdev(netdev);
f62b8bb8
AV
4057}
4058
b50d292b
HHZ
4059static void mlx5e_remove(struct mlx5_core_dev *mdev, void *vpriv)
4060{
127ea380
HHZ
4061 struct mlx5_eswitch *esw = mdev->priv.eswitch;
4062 int total_vfs = MLX5_TOTAL_VPORTS(mdev);
b50d292b 4063 struct mlx5e_priv *priv = vpriv;
127ea380 4064 int vport;
b50d292b 4065
127ea380
HHZ
4066 for (vport = 1; vport < total_vfs; vport++)
4067 mlx5_eswitch_unregister_vport_rep(esw, vport);
4068
5e1e93c7 4069 unregister_netdev(priv->netdev);
26e59d80
MHY
4070 mlx5e_detach(mdev, vpriv);
4071 mlx5e_destroy_netdev(mdev, priv);
b50d292b
HHZ
4072}
4073
f62b8bb8
AV
4074static void *mlx5e_get_netdev(void *vpriv)
4075{
4076 struct mlx5e_priv *priv = vpriv;
4077
4078 return priv->netdev;
4079}
4080
4081static struct mlx5_interface mlx5e_interface = {
b50d292b
HHZ
4082 .add = mlx5e_add,
4083 .remove = mlx5e_remove,
26e59d80
MHY
4084 .attach = mlx5e_attach,
4085 .detach = mlx5e_detach,
f62b8bb8
AV
4086 .event = mlx5e_async_event,
4087 .protocol = MLX5_INTERFACE_PROTOCOL_ETH,
4088 .get_dev = mlx5e_get_netdev,
4089};
4090
4091void mlx5e_init(void)
4092{
665bc539 4093 mlx5e_build_ptys2ethtool_map();
f62b8bb8
AV
4094 mlx5_register_interface(&mlx5e_interface);
4095}
4096
4097void mlx5e_cleanup(void)
4098{
4099 mlx5_unregister_interface(&mlx5e_interface);
4100}