]> git.proxmox.com Git - mirror_ubuntu-jammy-kernel.git/blame - drivers/net/ethernet/mellanox/mlx5/core/en_main.c
net: core: page_pool: add user refcnt and reintroduce page_pool_destroy
[mirror_ubuntu-jammy-kernel.git] / drivers / net / ethernet / mellanox / mlx5 / core / en_main.c
CommitLineData
f62b8bb8 1/*
b3f63c3d 2 * Copyright (c) 2015-2016, Mellanox Technologies. All rights reserved.
f62b8bb8
AV
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
32
e8f887ac
AV
33#include <net/tc_act/tc_gact.h>
34#include <net/pkt_cls.h>
86d722ad 35#include <linux/mlx5/fs.h>
b3f63c3d 36#include <net/vxlan.h>
e3cfc7e6 37#include <net/geneve.h>
86994156 38#include <linux/bpf.h>
4b89251d 39#include <linux/if_bridge.h>
60bbf7ee 40#include <net/page_pool.h>
db05815b 41#include <net/xdp_sock.h>
1d447a39 42#include "eswitch.h"
f62b8bb8 43#include "en.h"
542578c6 44#include "en/txrx.h"
e8f887ac 45#include "en_tc.h"
1d447a39 46#include "en_rep.h"
547eede0 47#include "en_accel/ipsec.h"
899a59d3 48#include "en_accel/ipsec_rxtx.h"
e3cfc7e6 49#include "en_accel/en_accel.h"
c83294b9 50#include "en_accel/tls.h"
899a59d3 51#include "accel/ipsec.h"
c83294b9 52#include "accel/tls.h"
358aa5ce 53#include "lib/vxlan.h"
6dbc80ca 54#include "lib/clock.h"
2c81bfd5 55#include "en/port.h"
159d2131 56#include "en/xdp.h"
f2f3df55 57#include "lib/eq.h"
5c7e8bbb 58#include "en/monitor_stats.h"
de8650a8 59#include "en/reporter.h"
9a22d5d8 60#include "en/params.h"
db05815b
MM
61#include "en/xsk/umem.h"
62#include "en/xsk/setup.h"
63#include "en/xsk/rx.h"
64#include "en/xsk/tx.h"
f62b8bb8 65
542578c6 66
2ccb0a79 67bool mlx5e_check_fragmented_striding_rq_cap(struct mlx5_core_dev *mdev)
2fc4bfb7 68{
ea3886ca 69 bool striding_rq_umr = MLX5_CAP_GEN(mdev, striding_rq) &&
2fc4bfb7
SM
70 MLX5_CAP_GEN(mdev, umr_ptr_rlky) &&
71 MLX5_CAP_ETH(mdev, reg_umr_sq);
ea3886ca
TT
72 u16 max_wqe_sz_cap = MLX5_CAP_GEN(mdev, max_wqe_sz_sq);
73 bool inline_umr = MLX5E_UMR_WQE_INLINE_SZ <= max_wqe_sz_cap;
74
75 if (!striding_rq_umr)
76 return false;
77 if (!inline_umr) {
78 mlx5_core_warn(mdev, "Cannot support Striding RQ: UMR WQE size (%d) exceeds maximum supported (%d).\n",
79 (int)MLX5E_UMR_WQE_INLINE_SZ, max_wqe_sz_cap);
80 return false;
81 }
82 return true;
2fc4bfb7
SM
83}
84
696a97cf 85void mlx5e_init_rq_type_params(struct mlx5_core_dev *mdev,
2a0f561b 86 struct mlx5e_params *params)
2fc4bfb7 87{
73281b78
TT
88 params->log_rq_mtu_frames = is_kdump_kernel() ?
89 MLX5E_PARAMS_MINIMUM_LOG_RQ_SIZE :
90 MLX5E_PARAMS_DEFAULT_LOG_RQ_SIZE;
2fc4bfb7 91
6a9764ef
SM
92 mlx5_core_info(mdev, "MLX5E: StrdRq(%d) RqSz(%ld) StrdSz(%ld) RxCqeCmprss(%d)\n",
93 params->rq_wq_type == MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ,
619a8f2a 94 params->rq_wq_type == MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ ?
db05815b 95 BIT(mlx5e_mpwqe_get_log_rq_size(params, NULL)) :
73281b78 96 BIT(params->log_rq_mtu_frames),
db05815b 97 BIT(mlx5e_mpwqe_get_log_stride_size(mdev, params, NULL)),
6a9764ef 98 MLX5E_GET_PFLAG(params, MLX5E_PFLAG_RX_CQE_COMPRESS));
2fc4bfb7
SM
99}
100
2ccb0a79
TT
101bool mlx5e_striding_rq_possible(struct mlx5_core_dev *mdev,
102 struct mlx5e_params *params)
103{
db05815b
MM
104 if (!mlx5e_check_fragmented_striding_rq_cap(mdev))
105 return false;
106
107 if (MLX5_IPSEC_DEV(mdev))
108 return false;
109
110 if (params->xdp_prog) {
111 /* XSK params are not considered here. If striding RQ is in use,
112 * and an XSK is being opened, mlx5e_rx_mpwqe_is_linear_skb will
113 * be called with the known XSK params.
114 */
115 if (!mlx5e_rx_mpwqe_is_linear_skb(mdev, params, NULL))
116 return false;
117 }
118
119 return true;
2ccb0a79 120}
291f445e 121
2ccb0a79 122void mlx5e_set_rq_type(struct mlx5_core_dev *mdev, struct mlx5e_params *params)
2fc4bfb7 123{
2ccb0a79
TT
124 params->rq_wq_type = mlx5e_striding_rq_possible(mdev, params) &&
125 MLX5E_GET_PFLAG(params, MLX5E_PFLAG_RX_STRIDING_RQ) ?
291f445e 126 MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ :
99cbfa93 127 MLX5_WQ_TYPE_CYCLIC;
2fc4bfb7
SM
128}
129
b36cdb42 130void mlx5e_update_carrier(struct mlx5e_priv *priv)
f62b8bb8
AV
131{
132 struct mlx5_core_dev *mdev = priv->mdev;
133 u8 port_state;
134
135 port_state = mlx5_query_vport_state(mdev,
cc9c82a8 136 MLX5_VPORT_STATE_OP_MOD_VNIC_VPORT,
e53eef63 137 0);
f62b8bb8 138
87424ad5
SD
139 if (port_state == VPORT_STATE_UP) {
140 netdev_info(priv->netdev, "Link up\n");
f62b8bb8 141 netif_carrier_on(priv->netdev);
87424ad5
SD
142 } else {
143 netdev_info(priv->netdev, "Link down\n");
f62b8bb8 144 netif_carrier_off(priv->netdev);
87424ad5 145 }
f62b8bb8
AV
146}
147
148static void mlx5e_update_carrier_work(struct work_struct *work)
149{
150 struct mlx5e_priv *priv = container_of(work, struct mlx5e_priv,
151 update_carrier_work);
152
153 mutex_lock(&priv->state_lock);
154 if (test_bit(MLX5E_STATE_OPENED, &priv->state))
7ca42c80
ES
155 if (priv->profile->update_carrier)
156 priv->profile->update_carrier(priv);
f62b8bb8
AV
157 mutex_unlock(&priv->state_lock);
158}
159
19386177 160void mlx5e_update_stats(struct mlx5e_priv *priv)
f62b8bb8 161{
19386177 162 int i;
f62b8bb8 163
19386177
KH
164 for (i = mlx5e_num_stats_grps - 1; i >= 0; i--)
165 if (mlx5e_stats_grps[i].update_stats)
166 mlx5e_stats_grps[i].update_stats(priv);
f62b8bb8
AV
167}
168
5c7e8bbb 169void mlx5e_update_ndo_stats(struct mlx5e_priv *priv)
3834a5e6 170{
19386177
KH
171 int i;
172
173 for (i = mlx5e_num_stats_grps - 1; i >= 0; i--)
174 if (mlx5e_stats_grps[i].update_stats_mask &
175 MLX5E_NDO_UPDATE_STATS)
176 mlx5e_stats_grps[i].update_stats(priv);
3834a5e6
GP
177}
178
303211b4 179static void mlx5e_update_stats_work(struct work_struct *work)
f62b8bb8 180{
cdeef2b1 181 struct mlx5e_priv *priv = container_of(work, struct mlx5e_priv,
f62b8bb8 182 update_stats_work);
ed56c519 183
f62b8bb8 184 mutex_lock(&priv->state_lock);
ed56c519 185 priv->profile->update_stats(priv);
f62b8bb8
AV
186 mutex_unlock(&priv->state_lock);
187}
188
cdeef2b1
SM
189void mlx5e_queue_update_stats(struct mlx5e_priv *priv)
190{
191 if (!priv->profile->update_stats)
192 return;
193
194 if (unlikely(test_bit(MLX5E_STATE_DESTROYING, &priv->state)))
195 return;
196
197 queue_work(priv->wq, &priv->update_stats_work);
198}
199
7cffaddd 200static int async_event(struct notifier_block *nb, unsigned long event, void *data)
f62b8bb8 201{
7cffaddd
SM
202 struct mlx5e_priv *priv = container_of(nb, struct mlx5e_priv, events_nb);
203 struct mlx5_eqe *eqe = data;
daa21560 204
7cffaddd
SM
205 if (event != MLX5_EVENT_TYPE_PORT_CHANGE)
206 return NOTIFY_DONE;
daa21560 207
7cffaddd
SM
208 switch (eqe->sub_type) {
209 case MLX5_PORT_CHANGE_SUBTYPE_DOWN:
210 case MLX5_PORT_CHANGE_SUBTYPE_ACTIVE:
7bb29755 211 queue_work(priv->wq, &priv->update_carrier_work);
f62b8bb8 212 break;
f62b8bb8 213 default:
7cffaddd 214 return NOTIFY_DONE;
f62b8bb8 215 }
7cffaddd
SM
216
217 return NOTIFY_OK;
f62b8bb8
AV
218}
219
f62b8bb8
AV
220static void mlx5e_enable_async_events(struct mlx5e_priv *priv)
221{
7cffaddd
SM
222 priv->events_nb.notifier_call = async_event;
223 mlx5_notifier_register(priv->mdev, &priv->events_nb);
f62b8bb8
AV
224}
225
226static void mlx5e_disable_async_events(struct mlx5e_priv *priv)
227{
7cffaddd 228 mlx5_notifier_unregister(priv->mdev, &priv->events_nb);
f62b8bb8
AV
229}
230
31391048
SM
231static inline void mlx5e_build_umr_wqe(struct mlx5e_rq *rq,
232 struct mlx5e_icosq *sq,
b8a98a4c 233 struct mlx5e_umr_wqe *wqe)
7e426671
TT
234{
235 struct mlx5_wqe_ctrl_seg *cseg = &wqe->ctrl;
236 struct mlx5_wqe_umr_ctrl_seg *ucseg = &wqe->uctrl;
ea3886ca 237 u8 ds_cnt = DIV_ROUND_UP(MLX5E_UMR_WQE_INLINE_SZ, MLX5_SEND_WQE_DS);
7e426671
TT
238
239 cseg->qpn_ds = cpu_to_be32((sq->sqn << MLX5_WQE_CTRL_QPN_SHIFT) |
240 ds_cnt);
241 cseg->fm_ce_se = MLX5_WQE_CTRL_CQ_UPDATE;
242 cseg->imm = rq->mkey_be;
243
ea3886ca 244 ucseg->flags = MLX5_UMR_TRANSLATION_OFFSET_EN | MLX5_UMR_INLINE;
31616255 245 ucseg->xlt_octowords =
7e426671 246 cpu_to_be16(MLX5_MTT_OCTW(MLX5_MPWRQ_PAGES_PER_WQE));
7e426671 247 ucseg->mkey_mask = cpu_to_be64(MLX5_MKEY_MASK_FREE);
7e426671
TT
248}
249
422d4c40
TT
250static u32 mlx5e_rqwq_get_size(struct mlx5e_rq *rq)
251{
252 switch (rq->wq_type) {
253 case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ:
254 return mlx5_wq_ll_get_size(&rq->mpwqe.wq);
255 default:
99cbfa93 256 return mlx5_wq_cyc_get_size(&rq->wqe.wq);
422d4c40
TT
257 }
258}
259
260static u32 mlx5e_rqwq_get_cur_sz(struct mlx5e_rq *rq)
261{
262 switch (rq->wq_type) {
263 case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ:
264 return rq->mpwqe.wq.cur_sz;
265 default:
266 return rq->wqe.wq.cur_sz;
267 }
268}
269
7e426671
TT
270static int mlx5e_rq_alloc_mpwqe_info(struct mlx5e_rq *rq,
271 struct mlx5e_channel *c)
272{
422d4c40 273 int wq_sz = mlx5_wq_ll_get_size(&rq->mpwqe.wq);
7e426671 274
eec4edc9
KC
275 rq->mpwqe.info = kvzalloc_node(array_size(wq_sz,
276 sizeof(*rq->mpwqe.info)),
ca11b798 277 GFP_KERNEL, cpu_to_node(c->cpu));
21c59685 278 if (!rq->mpwqe.info)
ea3886ca 279 return -ENOMEM;
7e426671 280
b8a98a4c 281 mlx5e_build_umr_wqe(rq, &c->icosq, &rq->mpwqe.umr_wqe);
7e426671
TT
282
283 return 0;
7e426671
TT
284}
285
a43b25da 286static int mlx5e_create_umr_mkey(struct mlx5_core_dev *mdev,
ec8b9981
TT
287 u64 npages, u8 page_shift,
288 struct mlx5_core_mkey *umr_mkey)
3608ae77 289{
3608ae77
TT
290 int inlen = MLX5_ST_SZ_BYTES(create_mkey_in);
291 void *mkc;
292 u32 *in;
293 int err;
294
1b9a07ee 295 in = kvzalloc(inlen, GFP_KERNEL);
3608ae77
TT
296 if (!in)
297 return -ENOMEM;
298
299 mkc = MLX5_ADDR_OF(create_mkey_in, in, memory_key_mkey_entry);
300
3608ae77
TT
301 MLX5_SET(mkc, mkc, free, 1);
302 MLX5_SET(mkc, mkc, umr_en, 1);
303 MLX5_SET(mkc, mkc, lw, 1);
304 MLX5_SET(mkc, mkc, lr, 1);
cdbd0d2b 305 MLX5_SET(mkc, mkc, access_mode_1_0, MLX5_MKC_ACCESS_MODE_MTT);
3608ae77
TT
306
307 MLX5_SET(mkc, mkc, qpn, 0xffffff);
308 MLX5_SET(mkc, mkc, pd, mdev->mlx5e_res.pdn);
ec8b9981 309 MLX5_SET64(mkc, mkc, len, npages << page_shift);
3608ae77
TT
310 MLX5_SET(mkc, mkc, translations_octword_size,
311 MLX5_MTT_OCTW(npages));
ec8b9981 312 MLX5_SET(mkc, mkc, log_page_size, page_shift);
3608ae77 313
ec8b9981 314 err = mlx5_core_create_mkey(mdev, umr_mkey, in, inlen);
3608ae77
TT
315
316 kvfree(in);
317 return err;
318}
319
a43b25da 320static int mlx5e_create_rq_umr_mkey(struct mlx5_core_dev *mdev, struct mlx5e_rq *rq)
ec8b9981 321{
422d4c40 322 u64 num_mtts = MLX5E_REQUIRED_MTTS(mlx5_wq_ll_get_size(&rq->mpwqe.wq));
ec8b9981 323
a43b25da 324 return mlx5e_create_umr_mkey(mdev, num_mtts, PAGE_SHIFT, &rq->umr_mkey);
ec8b9981
TT
325}
326
b8a98a4c
TT
327static inline u64 mlx5e_get_mpwqe_offset(struct mlx5e_rq *rq, u16 wqe_ix)
328{
329 return (wqe_ix << MLX5E_LOG_ALIGNED_MPWQE_PPW) << PAGE_SHIFT;
330}
331
069d1146
TT
332static void mlx5e_init_frags_partition(struct mlx5e_rq *rq)
333{
334 struct mlx5e_wqe_frag_info next_frag, *prev;
335 int i;
336
337 next_frag.di = &rq->wqe.di[0];
338 next_frag.offset = 0;
339 prev = NULL;
340
341 for (i = 0; i < mlx5_wq_cyc_get_size(&rq->wqe.wq); i++) {
342 struct mlx5e_rq_frag_info *frag_info = &rq->wqe.info.arr[0];
343 struct mlx5e_wqe_frag_info *frag =
344 &rq->wqe.frags[i << rq->wqe.info.log_num_frags];
345 int f;
346
347 for (f = 0; f < rq->wqe.info.num_frags; f++, frag++) {
348 if (next_frag.offset + frag_info[f].frag_stride > PAGE_SIZE) {
349 next_frag.di++;
350 next_frag.offset = 0;
351 if (prev)
352 prev->last_in_page = true;
353 }
354 *frag = next_frag;
355
356 /* prepare next */
357 next_frag.offset += frag_info[f].frag_stride;
358 prev = frag;
359 }
360 }
361
362 if (prev)
363 prev->last_in_page = true;
364}
365
366static int mlx5e_init_di_list(struct mlx5e_rq *rq,
069d1146
TT
367 int wq_sz, int cpu)
368{
369 int len = wq_sz << rq->wqe.info.log_num_frags;
370
84ca176b 371 rq->wqe.di = kvzalloc_node(array_size(len, sizeof(*rq->wqe.di)),
069d1146
TT
372 GFP_KERNEL, cpu_to_node(cpu));
373 if (!rq->wqe.di)
374 return -ENOMEM;
375
376 mlx5e_init_frags_partition(rq);
377
378 return 0;
379}
380
381static void mlx5e_free_di_list(struct mlx5e_rq *rq)
382{
383 kvfree(rq->wqe.di);
384}
385
3b77235b 386static int mlx5e_alloc_rq(struct mlx5e_channel *c,
6a9764ef 387 struct mlx5e_params *params,
db05815b
MM
388 struct mlx5e_xsk_param *xsk,
389 struct xdp_umem *umem,
6a9764ef 390 struct mlx5e_rq_param *rqp,
3b77235b 391 struct mlx5e_rq *rq)
f62b8bb8 392{
60bbf7ee 393 struct page_pool_params pp_params = { 0 };
a43b25da 394 struct mlx5_core_dev *mdev = c->mdev;
6a9764ef 395 void *rqc = rqp->rqc;
f62b8bb8 396 void *rqc_wq = MLX5_ADDR_OF(rqc, rqc, wq);
db05815b
MM
397 u32 num_xsk_frames = 0;
398 u32 rq_xdp_ix;
069d1146 399 u32 pool_size;
f62b8bb8
AV
400 int wq_sz;
401 int err;
402 int i;
403
231243c8 404 rqp->wq.db_numa_node = cpu_to_node(c->cpu);
311c7c71 405
6a9764ef 406 rq->wq_type = params->rq_wq_type;
7e426671
TT
407 rq->pdev = c->pdev;
408 rq->netdev = c->netdev;
a43b25da 409 rq->tstamp = c->tstamp;
7c39afb3 410 rq->clock = &mdev->clock;
7e426671
TT
411 rq->channel = c;
412 rq->ix = c->ix;
a43b25da 413 rq->mdev = mdev;
0073c8f7 414 rq->hw_mtu = MLX5E_SW2HW_MTU(params, params->sw_mtu);
b9673cf5 415 rq->xdpsq = &c->rq_xdpsq;
db05815b
MM
416 rq->umem = umem;
417
418 if (rq->umem)
419 rq->stats = &c->priv->channel_stats[c->ix].xskrq;
420 else
421 rq->stats = &c->priv->channel_stats[c->ix].rq;
97bc402d 422
6a9764ef 423 rq->xdp_prog = params->xdp_prog ? bpf_prog_inc(params->xdp_prog) : NULL;
97bc402d
DB
424 if (IS_ERR(rq->xdp_prog)) {
425 err = PTR_ERR(rq->xdp_prog);
426 rq->xdp_prog = NULL;
427 goto err_rq_wq_destroy;
428 }
7e426671 429
db05815b
MM
430 rq_xdp_ix = rq->ix;
431 if (xsk)
432 rq_xdp_ix += params->num_channels * MLX5E_RQ_GROUP_XSK;
433 err = xdp_rxq_info_reg(&rq->xdp_rxq, rq->netdev, rq_xdp_ix);
e213f5b6 434 if (err < 0)
0ddf5432
JDB
435 goto err_rq_wq_destroy;
436
bce2b2bf 437 rq->buff.map_dir = rq->xdp_prog ? DMA_BIDIRECTIONAL : DMA_FROM_DEVICE;
db05815b
MM
438 rq->buff.headroom = mlx5e_get_rq_headroom(mdev, params, xsk);
439 rq->buff.umem_headroom = xsk ? xsk->headroom : 0;
60bbf7ee 440 pool_size = 1 << params->log_rq_mtu_frames;
b5503b99 441
6a9764ef 442 switch (rq->wq_type) {
461017cb 443 case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ:
422d4c40
TT
444 err = mlx5_wq_ll_create(mdev, &rqp->wq, rqc_wq, &rq->mpwqe.wq,
445 &rq->wq_ctrl);
446 if (err)
447 return err;
448
449 rq->mpwqe.wq.db = &rq->mpwqe.wq.db[MLX5_RCV_DBR];
450
451 wq_sz = mlx5_wq_ll_get_size(&rq->mpwqe.wq);
60bbf7ee 452
db05815b
MM
453 if (xsk)
454 num_xsk_frames = wq_sz <<
455 mlx5e_mpwqe_get_log_num_strides(mdev, params, xsk);
456
457 pool_size = MLX5_MPWRQ_PAGES_PER_WQE <<
458 mlx5e_mpwqe_get_log_rq_size(params, xsk);
422d4c40 459
7cc6d77b 460 rq->post_wqes = mlx5e_post_rx_mpwqes;
6cd392a0 461 rq->dealloc_wqe = mlx5e_dealloc_rx_mpwqe;
461017cb 462
20fd0c19 463 rq->handle_rx_cqe = c->priv->profile->rx_handlers.handle_rx_cqe_mpwqe;
899a59d3
IT
464#ifdef CONFIG_MLX5_EN_IPSEC
465 if (MLX5_IPSEC_DEV(mdev)) {
466 err = -EINVAL;
467 netdev_err(c->netdev, "MPWQE RQ with IPSec offload not supported\n");
468 goto err_rq_wq_destroy;
469 }
470#endif
20fd0c19
SM
471 if (!rq->handle_rx_cqe) {
472 err = -EINVAL;
473 netdev_err(c->netdev, "RX handler of MPWQE RQ is not set, err %d\n", err);
474 goto err_rq_wq_destroy;
475 }
476
db05815b
MM
477 rq->mpwqe.skb_from_cqe_mpwrq = xsk ?
478 mlx5e_xsk_skb_from_cqe_mpwrq_linear :
479 mlx5e_rx_mpwqe_is_linear_skb(mdev, params, NULL) ?
480 mlx5e_skb_from_cqe_mpwrq_linear :
481 mlx5e_skb_from_cqe_mpwrq_nonlinear;
482
483 rq->mpwqe.log_stride_sz = mlx5e_mpwqe_get_log_stride_size(mdev, params, xsk);
484 rq->mpwqe.num_strides =
485 BIT(mlx5e_mpwqe_get_log_num_strides(mdev, params, xsk));
1bfecfca 486
a43b25da 487 err = mlx5e_create_rq_umr_mkey(mdev, rq);
7e426671
TT
488 if (err)
489 goto err_rq_wq_destroy;
ec8b9981
TT
490 rq->mkey_be = cpu_to_be32(rq->umr_mkey.key);
491
492 err = mlx5e_rq_alloc_mpwqe_info(rq, c);
493 if (err)
069d1146 494 goto err_free;
461017cb 495 break;
99cbfa93
TT
496 default: /* MLX5_WQ_TYPE_CYCLIC */
497 err = mlx5_wq_cyc_create(mdev, &rqp->wq, rqc_wq, &rq->wqe.wq,
498 &rq->wq_ctrl);
422d4c40
TT
499 if (err)
500 return err;
501
502 rq->wqe.wq.db = &rq->wqe.wq.db[MLX5_RCV_DBR];
503
99cbfa93 504 wq_sz = mlx5_wq_cyc_get_size(&rq->wqe.wq);
422d4c40 505
db05815b
MM
506 if (xsk)
507 num_xsk_frames = wq_sz << rq->wqe.info.log_num_frags;
508
069d1146
TT
509 rq->wqe.info = rqp->frags_info;
510 rq->wqe.frags =
84ca176b
KC
511 kvzalloc_node(array_size(sizeof(*rq->wqe.frags),
512 (wq_sz << rq->wqe.info.log_num_frags)),
069d1146 513 GFP_KERNEL, cpu_to_node(c->cpu));
47a6ca3f
WY
514 if (!rq->wqe.frags) {
515 err = -ENOMEM;
069d1146 516 goto err_free;
47a6ca3f 517 }
069d1146 518
83b2fd64 519 err = mlx5e_init_di_list(rq, wq_sz, c->cpu);
069d1146
TT
520 if (err)
521 goto err_free;
db05815b 522
7cc6d77b 523 rq->post_wqes = mlx5e_post_rx_wqes;
6cd392a0 524 rq->dealloc_wqe = mlx5e_dealloc_rx_wqe;
461017cb 525
899a59d3
IT
526#ifdef CONFIG_MLX5_EN_IPSEC
527 if (c->priv->ipsec)
528 rq->handle_rx_cqe = mlx5e_ipsec_handle_rx_cqe;
529 else
530#endif
531 rq->handle_rx_cqe = c->priv->profile->rx_handlers.handle_rx_cqe;
20fd0c19 532 if (!rq->handle_rx_cqe) {
20fd0c19
SM
533 err = -EINVAL;
534 netdev_err(c->netdev, "RX handler of RQ is not set, err %d\n", err);
069d1146 535 goto err_free;
20fd0c19
SM
536 }
537
db05815b
MM
538 rq->wqe.skb_from_cqe = xsk ?
539 mlx5e_xsk_skb_from_cqe_linear :
540 mlx5e_rx_is_linear_skb(params, NULL) ?
541 mlx5e_skb_from_cqe_linear :
542 mlx5e_skb_from_cqe_nonlinear;
7e426671 543 rq->mkey_be = c->mkey_be;
461017cb 544 }
f62b8bb8 545
db05815b
MM
546 if (xsk) {
547 err = mlx5e_xsk_resize_reuseq(umem, num_xsk_frames);
548 if (unlikely(err)) {
549 mlx5_core_err(mdev, "Unable to allocate the Reuse Ring for %u frames\n",
550 num_xsk_frames);
551 goto err_free;
552 }
553
554 rq->zca.free = mlx5e_xsk_zca_free;
555 err = xdp_rxq_info_reg_mem_model(&rq->xdp_rxq,
556 MEM_TYPE_ZERO_COPY,
557 &rq->zca);
558 } else {
559 /* Create a page_pool and register it with rxq */
560 pp_params.order = 0;
561 pp_params.flags = 0; /* No-internal DMA mapping in page_pool */
562 pp_params.pool_size = pool_size;
563 pp_params.nid = cpu_to_node(c->cpu);
564 pp_params.dev = c->pdev;
565 pp_params.dma_dir = rq->buff.map_dir;
566
567 /* page_pool can be used even when there is no rq->xdp_prog,
568 * given page_pool does not handle DMA mapping there is no
569 * required state to clear. And page_pool gracefully handle
570 * elevated refcnt.
571 */
572 rq->page_pool = page_pool_create(&pp_params);
573 if (IS_ERR(rq->page_pool)) {
574 err = PTR_ERR(rq->page_pool);
575 rq->page_pool = NULL;
576 goto err_free;
577 }
578 err = xdp_rxq_info_reg_mem_model(&rq->xdp_rxq,
579 MEM_TYPE_PAGE_POOL, rq->page_pool);
84f5e3fb 580 }
db05815b 581 if (err)
069d1146 582 goto err_free;
84f5e3fb 583
f62b8bb8 584 for (i = 0; i < wq_sz; i++) {
4c2af5cc 585 if (rq->wq_type == MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ) {
99cbfa93 586 struct mlx5e_rx_wqe_ll *wqe =
422d4c40 587 mlx5_wq_ll_get_wqe(&rq->mpwqe.wq, i);
069d1146
TT
588 u32 byte_count =
589 rq->mpwqe.num_strides << rq->mpwqe.log_stride_sz;
b8a98a4c 590 u64 dma_offset = mlx5e_get_mpwqe_offset(rq, i);
4c2af5cc 591
99cbfa93
TT
592 wqe->data[0].addr = cpu_to_be64(dma_offset + rq->buff.headroom);
593 wqe->data[0].byte_count = cpu_to_be32(byte_count);
594 wqe->data[0].lkey = rq->mkey_be;
422d4c40 595 } else {
99cbfa93
TT
596 struct mlx5e_rx_wqe_cyc *wqe =
597 mlx5_wq_cyc_get_wqe(&rq->wqe.wq, i);
069d1146
TT
598 int f;
599
600 for (f = 0; f < rq->wqe.info.num_frags; f++) {
601 u32 frag_size = rq->wqe.info.arr[f].frag_size |
602 MLX5_HW_START_PADDING;
603
604 wqe->data[f].byte_count = cpu_to_be32(frag_size);
605 wqe->data[f].lkey = rq->mkey_be;
606 }
607 /* check if num_frags is not a pow of two */
608 if (rq->wqe.info.num_frags < (1 << rq->wqe.info.log_num_frags)) {
609 wqe->data[f].byte_count = 0;
610 wqe->data[f].lkey = cpu_to_be32(MLX5_INVALID_LKEY);
611 wqe->data[f].addr = 0;
612 }
422d4c40 613 }
f62b8bb8
AV
614 }
615
9a317425
AG
616 INIT_WORK(&rq->dim.work, mlx5e_rx_dim_work);
617
618 switch (params->rx_cq_moderation.cq_period_mode) {
619 case MLX5_CQ_PERIOD_MODE_START_FROM_CQE:
c002bd52 620 rq->dim.mode = DIM_CQ_PERIOD_MODE_START_FROM_CQE;
9a317425
AG
621 break;
622 case MLX5_CQ_PERIOD_MODE_START_FROM_EQE:
623 default:
c002bd52 624 rq->dim.mode = DIM_CQ_PERIOD_MODE_START_FROM_EQE;
9a317425
AG
625 }
626
4415a031
TT
627 rq->page_cache.head = 0;
628 rq->page_cache.tail = 0;
629
f62b8bb8
AV
630 return 0;
631
069d1146
TT
632err_free:
633 switch (rq->wq_type) {
634 case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ:
ca11b798 635 kvfree(rq->mpwqe.info);
069d1146
TT
636 mlx5_core_destroy_mkey(mdev, &rq->umr_mkey);
637 break;
638 default: /* MLX5_WQ_TYPE_CYCLIC */
639 kvfree(rq->wqe.frags);
640 mlx5e_free_di_list(rq);
641 }
ec8b9981 642
f62b8bb8 643err_rq_wq_destroy:
97bc402d
DB
644 if (rq->xdp_prog)
645 bpf_prog_put(rq->xdp_prog);
0ddf5432 646 xdp_rxq_info_unreg(&rq->xdp_rxq);
1da4bbef 647 page_pool_destroy(rq->page_pool);
f62b8bb8
AV
648 mlx5_wq_destroy(&rq->wq_ctrl);
649
650 return err;
651}
652
3b77235b 653static void mlx5e_free_rq(struct mlx5e_rq *rq)
f62b8bb8 654{
4415a031
TT
655 int i;
656
86994156
RS
657 if (rq->xdp_prog)
658 bpf_prog_put(rq->xdp_prog);
659
461017cb
TT
660 switch (rq->wq_type) {
661 case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ:
ca11b798 662 kvfree(rq->mpwqe.info);
a43b25da 663 mlx5_core_destroy_mkey(rq->mdev, &rq->umr_mkey);
461017cb 664 break;
99cbfa93 665 default: /* MLX5_WQ_TYPE_CYCLIC */
069d1146
TT
666 kvfree(rq->wqe.frags);
667 mlx5e_free_di_list(rq);
461017cb
TT
668 }
669
4415a031
TT
670 for (i = rq->page_cache.head; i != rq->page_cache.tail;
671 i = (i + 1) & (MLX5E_CACHE_SIZE - 1)) {
672 struct mlx5e_dma_info *dma_info = &rq->page_cache.page_cache[i];
673
db05815b
MM
674 /* With AF_XDP, page_cache is not used, so this loop is not
675 * entered, and it's safe to call mlx5e_page_release_dynamic
676 * directly.
677 */
678 mlx5e_page_release_dynamic(rq, dma_info, false);
4415a031 679 }
29b006a6
JDB
680
681 xdp_rxq_info_unreg(&rq->xdp_rxq);
1da4bbef 682 page_pool_destroy(rq->page_pool);
f62b8bb8
AV
683 mlx5_wq_destroy(&rq->wq_ctrl);
684}
685
6a9764ef
SM
686static int mlx5e_create_rq(struct mlx5e_rq *rq,
687 struct mlx5e_rq_param *param)
f62b8bb8 688{
a43b25da 689 struct mlx5_core_dev *mdev = rq->mdev;
f62b8bb8
AV
690
691 void *in;
692 void *rqc;
693 void *wq;
694 int inlen;
695 int err;
696
697 inlen = MLX5_ST_SZ_BYTES(create_rq_in) +
698 sizeof(u64) * rq->wq_ctrl.buf.npages;
1b9a07ee 699 in = kvzalloc(inlen, GFP_KERNEL);
f62b8bb8
AV
700 if (!in)
701 return -ENOMEM;
702
703 rqc = MLX5_ADDR_OF(create_rq_in, in, ctx);
704 wq = MLX5_ADDR_OF(rqc, rqc, wq);
705
706 memcpy(rqc, param->rqc, sizeof(param->rqc));
707
97de9f31 708 MLX5_SET(rqc, rqc, cqn, rq->cq.mcq.cqn);
f62b8bb8 709 MLX5_SET(rqc, rqc, state, MLX5_RQC_STATE_RST);
f62b8bb8 710 MLX5_SET(wq, wq, log_wq_pg_sz, rq->wq_ctrl.buf.page_shift -
68cdf5d6 711 MLX5_ADAPTER_PAGE_SHIFT);
f62b8bb8
AV
712 MLX5_SET64(wq, wq, dbr_addr, rq->wq_ctrl.db.dma);
713
3a2f7033
TT
714 mlx5_fill_page_frag_array(&rq->wq_ctrl.buf,
715 (__be64 *)MLX5_ADDR_OF(wq, wq, pas));
f62b8bb8 716
7db22ffb 717 err = mlx5_core_create_rq(mdev, in, inlen, &rq->rqn);
f62b8bb8
AV
718
719 kvfree(in);
720
721 return err;
722}
723
36350114
GP
724static int mlx5e_modify_rq_state(struct mlx5e_rq *rq, int curr_state,
725 int next_state)
f62b8bb8 726{
7cbaf9a3 727 struct mlx5_core_dev *mdev = rq->mdev;
f62b8bb8
AV
728
729 void *in;
730 void *rqc;
731 int inlen;
732 int err;
733
734 inlen = MLX5_ST_SZ_BYTES(modify_rq_in);
1b9a07ee 735 in = kvzalloc(inlen, GFP_KERNEL);
f62b8bb8
AV
736 if (!in)
737 return -ENOMEM;
738
739 rqc = MLX5_ADDR_OF(modify_rq_in, in, ctx);
740
741 MLX5_SET(modify_rq_in, in, rq_state, curr_state);
742 MLX5_SET(rqc, rqc, state, next_state);
743
7db22ffb 744 err = mlx5_core_modify_rq(mdev, rq->rqn, in, inlen);
f62b8bb8
AV
745
746 kvfree(in);
747
748 return err;
749}
750
102722fc
GE
751static int mlx5e_modify_rq_scatter_fcs(struct mlx5e_rq *rq, bool enable)
752{
753 struct mlx5e_channel *c = rq->channel;
754 struct mlx5e_priv *priv = c->priv;
755 struct mlx5_core_dev *mdev = priv->mdev;
756
757 void *in;
758 void *rqc;
759 int inlen;
760 int err;
761
762 inlen = MLX5_ST_SZ_BYTES(modify_rq_in);
1b9a07ee 763 in = kvzalloc(inlen, GFP_KERNEL);
102722fc
GE
764 if (!in)
765 return -ENOMEM;
766
767 rqc = MLX5_ADDR_OF(modify_rq_in, in, ctx);
768
769 MLX5_SET(modify_rq_in, in, rq_state, MLX5_RQC_STATE_RDY);
770 MLX5_SET64(modify_rq_in, in, modify_bitmask,
771 MLX5_MODIFY_RQ_IN_MODIFY_BITMASK_SCATTER_FCS);
772 MLX5_SET(rqc, rqc, scatter_fcs, enable);
773 MLX5_SET(rqc, rqc, state, MLX5_RQC_STATE_RDY);
774
775 err = mlx5_core_modify_rq(mdev, rq->rqn, in, inlen);
776
777 kvfree(in);
778
779 return err;
780}
781
36350114
GP
782static int mlx5e_modify_rq_vsd(struct mlx5e_rq *rq, bool vsd)
783{
784 struct mlx5e_channel *c = rq->channel;
a43b25da 785 struct mlx5_core_dev *mdev = c->mdev;
36350114
GP
786 void *in;
787 void *rqc;
788 int inlen;
789 int err;
790
791 inlen = MLX5_ST_SZ_BYTES(modify_rq_in);
1b9a07ee 792 in = kvzalloc(inlen, GFP_KERNEL);
36350114
GP
793 if (!in)
794 return -ENOMEM;
795
796 rqc = MLX5_ADDR_OF(modify_rq_in, in, ctx);
797
798 MLX5_SET(modify_rq_in, in, rq_state, MLX5_RQC_STATE_RDY);
83b502a1
AV
799 MLX5_SET64(modify_rq_in, in, modify_bitmask,
800 MLX5_MODIFY_RQ_IN_MODIFY_BITMASK_VSD);
36350114
GP
801 MLX5_SET(rqc, rqc, vsd, vsd);
802 MLX5_SET(rqc, rqc, state, MLX5_RQC_STATE_RDY);
803
804 err = mlx5_core_modify_rq(mdev, rq->rqn, in, inlen);
805
806 kvfree(in);
807
808 return err;
809}
810
3b77235b 811static void mlx5e_destroy_rq(struct mlx5e_rq *rq)
f62b8bb8 812{
a43b25da 813 mlx5_core_destroy_rq(rq->mdev, rq->rqn);
f62b8bb8
AV
814}
815
db05815b 816int mlx5e_wait_for_min_rx_wqes(struct mlx5e_rq *rq, int wait_time)
f62b8bb8 817{
1e7477ae 818 unsigned long exp_time = jiffies + msecs_to_jiffies(wait_time);
f62b8bb8 819 struct mlx5e_channel *c = rq->channel;
a43b25da 820
422d4c40 821 u16 min_wqes = mlx5_min_rx_wqes(rq->wq_type, mlx5e_rqwq_get_size(rq));
f62b8bb8 822
1e7477ae 823 do {
422d4c40 824 if (mlx5e_rqwq_get_cur_sz(rq) >= min_wqes)
f62b8bb8
AV
825 return 0;
826
827 msleep(20);
1e7477ae
EBE
828 } while (time_before(jiffies, exp_time));
829
830 netdev_warn(c->netdev, "Failed to get min RX wqes on Channel[%d] RQN[0x%x] wq cur_sz(%d) min_rx_wqes(%d)\n",
422d4c40 831 c->ix, rq->rqn, mlx5e_rqwq_get_cur_sz(rq), min_wqes);
f62b8bb8
AV
832
833 return -ETIMEDOUT;
834}
835
f2fde18c
SM
836static void mlx5e_free_rx_descs(struct mlx5e_rq *rq)
837{
f2fde18c
SM
838 __be16 wqe_ix_be;
839 u16 wqe_ix;
840
422d4c40
TT
841 if (rq->wq_type == MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ) {
842 struct mlx5_wq_ll *wq = &rq->mpwqe.wq;
fd9b4be8
TT
843 u16 head = wq->head;
844 int i;
422d4c40 845
fd9b4be8
TT
846 /* Outstanding UMR WQEs (in progress) start at wq->head */
847 for (i = 0; i < rq->mpwqe.umr_in_progress; i++) {
848 rq->dealloc_wqe(rq, head);
849 head = mlx5_wq_ll_get_wqe_next_ix(wq, head);
850 }
422d4c40
TT
851
852 while (!mlx5_wq_ll_is_empty(wq)) {
99cbfa93 853 struct mlx5e_rx_wqe_ll *wqe;
422d4c40
TT
854
855 wqe_ix_be = *wq->tail_next;
856 wqe_ix = be16_to_cpu(wqe_ix_be);
857 wqe = mlx5_wq_ll_get_wqe(wq, wqe_ix);
858 rq->dealloc_wqe(rq, wqe_ix);
859 mlx5_wq_ll_pop(wq, wqe_ix_be,
860 &wqe->next.next_wqe_index);
861 }
862 } else {
99cbfa93 863 struct mlx5_wq_cyc *wq = &rq->wqe.wq;
422d4c40 864
99cbfa93
TT
865 while (!mlx5_wq_cyc_is_empty(wq)) {
866 wqe_ix = mlx5_wq_cyc_get_tail(wq);
422d4c40 867 rq->dealloc_wqe(rq, wqe_ix);
99cbfa93 868 mlx5_wq_cyc_pop(wq);
422d4c40 869 }
accd5883 870 }
069d1146 871
f2fde18c
SM
872}
873
db05815b
MM
874int mlx5e_open_rq(struct mlx5e_channel *c, struct mlx5e_params *params,
875 struct mlx5e_rq_param *param, struct mlx5e_xsk_param *xsk,
876 struct xdp_umem *umem, struct mlx5e_rq *rq)
f62b8bb8
AV
877{
878 int err;
879
db05815b 880 err = mlx5e_alloc_rq(c, params, xsk, umem, param, rq);
f62b8bb8
AV
881 if (err)
882 return err;
883
3b77235b 884 err = mlx5e_create_rq(rq, param);
f62b8bb8 885 if (err)
3b77235b 886 goto err_free_rq;
f62b8bb8 887
36350114 888 err = mlx5e_modify_rq_state(rq, MLX5_RQC_STATE_RST, MLX5_RQC_STATE_RDY);
f62b8bb8 889 if (err)
3b77235b 890 goto err_destroy_rq;
f62b8bb8 891
9a317425 892 if (params->rx_dim_enabled)
af5a6c93 893 __set_bit(MLX5E_RQ_STATE_AM, &c->rq.state);
cb3c7fd4 894
5d0bb3ba
SM
895 /* We disable csum_complete when XDP is enabled since
896 * XDP programs might manipulate packets which will render
897 * skb->checksum incorrect.
898 */
899 if (MLX5E_GET_PFLAG(params, MLX5E_PFLAG_RX_NO_CSUM_COMPLETE) || c->xdp)
b856df28
OG
900 __set_bit(MLX5E_RQ_STATE_NO_CSUM_COMPLETE, &c->rq.state);
901
f62b8bb8
AV
902 return 0;
903
f62b8bb8
AV
904err_destroy_rq:
905 mlx5e_destroy_rq(rq);
3b77235b
SM
906err_free_rq:
907 mlx5e_free_rq(rq);
f62b8bb8
AV
908
909 return err;
910}
911
acc6c595
SM
912static void mlx5e_activate_rq(struct mlx5e_rq *rq)
913{
acc6c595 914 set_bit(MLX5E_RQ_STATE_ENABLED, &rq->state);
63d26b49 915 mlx5e_trigger_irq(&rq->channel->icosq);
acc6c595
SM
916}
917
db05815b 918void mlx5e_deactivate_rq(struct mlx5e_rq *rq)
f62b8bb8 919{
c0f1147d 920 clear_bit(MLX5E_RQ_STATE_ENABLED, &rq->state);
f62b8bb8 921 napi_synchronize(&rq->channel->napi); /* prevent mlx5e_post_rx_wqes */
acc6c595 922}
cb3c7fd4 923
db05815b 924void mlx5e_close_rq(struct mlx5e_rq *rq)
acc6c595 925{
9a317425 926 cancel_work_sync(&rq->dim.work);
f62b8bb8 927 mlx5e_destroy_rq(rq);
3b77235b
SM
928 mlx5e_free_rx_descs(rq);
929 mlx5e_free_rq(rq);
f62b8bb8
AV
930}
931
31391048 932static void mlx5e_free_xdpsq_db(struct mlx5e_xdpsq *sq)
b5503b99 933{
fea28dd6 934 kvfree(sq->db.xdpi_fifo.xi);
1feeab80 935 kvfree(sq->db.wqe_info);
fea28dd6
TT
936}
937
938static int mlx5e_alloc_xdpsq_fifo(struct mlx5e_xdpsq *sq, int numa)
939{
940 struct mlx5e_xdp_info_fifo *xdpi_fifo = &sq->db.xdpi_fifo;
941 int wq_sz = mlx5_wq_cyc_get_size(&sq->wq);
942 int dsegs_per_wq = wq_sz * MLX5_SEND_WQEBB_NUM_DS;
943
944 xdpi_fifo->xi = kvzalloc_node(sizeof(*xdpi_fifo->xi) * dsegs_per_wq,
945 GFP_KERNEL, numa);
946 if (!xdpi_fifo->xi)
947 return -ENOMEM;
948
949 xdpi_fifo->pc = &sq->xdpi_fifo_pc;
950 xdpi_fifo->cc = &sq->xdpi_fifo_cc;
951 xdpi_fifo->mask = dsegs_per_wq - 1;
952
953 return 0;
b5503b99
SM
954}
955
31391048 956static int mlx5e_alloc_xdpsq_db(struct mlx5e_xdpsq *sq, int numa)
b5503b99 957{
1feeab80 958 int wq_sz = mlx5_wq_cyc_get_size(&sq->wq);
fea28dd6 959 int err;
b5503b99 960
1feeab80
TT
961 sq->db.wqe_info = kvzalloc_node(sizeof(*sq->db.wqe_info) * wq_sz,
962 GFP_KERNEL, numa);
963 if (!sq->db.wqe_info)
964 return -ENOMEM;
965
fea28dd6
TT
966 err = mlx5e_alloc_xdpsq_fifo(sq, numa);
967 if (err) {
31391048 968 mlx5e_free_xdpsq_db(sq);
fea28dd6 969 return err;
b5503b99
SM
970 }
971
972 return 0;
973}
974
31391048 975static int mlx5e_alloc_xdpsq(struct mlx5e_channel *c,
6a9764ef 976 struct mlx5e_params *params,
db05815b 977 struct xdp_umem *umem,
31391048 978 struct mlx5e_sq_param *param,
58b99ee3
TT
979 struct mlx5e_xdpsq *sq,
980 bool is_redirect)
31391048
SM
981{
982 void *sqc_wq = MLX5_ADDR_OF(sqc, param->sqc, wq);
a43b25da 983 struct mlx5_core_dev *mdev = c->mdev;
ddf385e3 984 struct mlx5_wq_cyc *wq = &sq->wq;
31391048
SM
985 int err;
986
987 sq->pdev = c->pdev;
988 sq->mkey_be = c->mkey_be;
989 sq->channel = c;
990 sq->uar_map = mdev->mlx5e_res.bfreg.map;
6a9764ef 991 sq->min_inline_mode = params->tx_min_inline_mode;
c94e4f11 992 sq->hw_mtu = MLX5E_SW2HW_MTU(params, params->sw_mtu);
db05815b
MM
993 sq->umem = umem;
994
995 sq->stats = sq->umem ?
996 &c->priv->channel_stats[c->ix].xsksq :
997 is_redirect ?
998 &c->priv->channel_stats[c->ix].xdpsq :
999 &c->priv->channel_stats[c->ix].rq_xdpsq;
31391048 1000
231243c8 1001 param->wq.db_numa_node = cpu_to_node(c->cpu);
ddf385e3 1002 err = mlx5_wq_cyc_create(mdev, &param->wq, sqc_wq, wq, &sq->wq_ctrl);
31391048
SM
1003 if (err)
1004 return err;
ddf385e3 1005 wq->db = &wq->db[MLX5_SND_DBR];
31391048 1006
231243c8 1007 err = mlx5e_alloc_xdpsq_db(sq, cpu_to_node(c->cpu));
31391048
SM
1008 if (err)
1009 goto err_sq_wq_destroy;
1010
1011 return 0;
1012
1013err_sq_wq_destroy:
1014 mlx5_wq_destroy(&sq->wq_ctrl);
1015
1016 return err;
1017}
1018
1019static void mlx5e_free_xdpsq(struct mlx5e_xdpsq *sq)
1020{
1021 mlx5e_free_xdpsq_db(sq);
1022 mlx5_wq_destroy(&sq->wq_ctrl);
1023}
1024
1025static void mlx5e_free_icosq_db(struct mlx5e_icosq *sq)
f62b8bb8 1026{
ca11b798 1027 kvfree(sq->db.ico_wqe);
f62b8bb8
AV
1028}
1029
31391048 1030static int mlx5e_alloc_icosq_db(struct mlx5e_icosq *sq, int numa)
f10b7cc7 1031{
fd9b4be8 1032 int wq_sz = mlx5_wq_cyc_get_size(&sq->wq);
f10b7cc7 1033
eec4edc9
KC
1034 sq->db.ico_wqe = kvzalloc_node(array_size(wq_sz,
1035 sizeof(*sq->db.ico_wqe)),
ca11b798 1036 GFP_KERNEL, numa);
f10b7cc7
SM
1037 if (!sq->db.ico_wqe)
1038 return -ENOMEM;
1039
1040 return 0;
1041}
1042
31391048 1043static int mlx5e_alloc_icosq(struct mlx5e_channel *c,
31391048
SM
1044 struct mlx5e_sq_param *param,
1045 struct mlx5e_icosq *sq)
f10b7cc7 1046{
31391048 1047 void *sqc_wq = MLX5_ADDR_OF(sqc, param->sqc, wq);
a43b25da 1048 struct mlx5_core_dev *mdev = c->mdev;
ddf385e3 1049 struct mlx5_wq_cyc *wq = &sq->wq;
31391048 1050 int err;
f10b7cc7 1051
31391048
SM
1052 sq->channel = c;
1053 sq->uar_map = mdev->mlx5e_res.bfreg.map;
f62b8bb8 1054
231243c8 1055 param->wq.db_numa_node = cpu_to_node(c->cpu);
ddf385e3 1056 err = mlx5_wq_cyc_create(mdev, &param->wq, sqc_wq, wq, &sq->wq_ctrl);
31391048
SM
1057 if (err)
1058 return err;
ddf385e3 1059 wq->db = &wq->db[MLX5_SND_DBR];
f62b8bb8 1060
231243c8 1061 err = mlx5e_alloc_icosq_db(sq, cpu_to_node(c->cpu));
31391048
SM
1062 if (err)
1063 goto err_sq_wq_destroy;
1064
f62b8bb8 1065 return 0;
31391048
SM
1066
1067err_sq_wq_destroy:
1068 mlx5_wq_destroy(&sq->wq_ctrl);
1069
1070 return err;
f62b8bb8
AV
1071}
1072
31391048 1073static void mlx5e_free_icosq(struct mlx5e_icosq *sq)
f10b7cc7 1074{
31391048
SM
1075 mlx5e_free_icosq_db(sq);
1076 mlx5_wq_destroy(&sq->wq_ctrl);
f10b7cc7
SM
1077}
1078
31391048 1079static void mlx5e_free_txqsq_db(struct mlx5e_txqsq *sq)
f10b7cc7 1080{
ca11b798
TT
1081 kvfree(sq->db.wqe_info);
1082 kvfree(sq->db.dma_fifo);
f10b7cc7
SM
1083}
1084
31391048 1085static int mlx5e_alloc_txqsq_db(struct mlx5e_txqsq *sq, int numa)
b5503b99 1086{
31391048
SM
1087 int wq_sz = mlx5_wq_cyc_get_size(&sq->wq);
1088 int df_sz = wq_sz * MLX5_SEND_WQEBB_NUM_DS;
1089
eec4edc9
KC
1090 sq->db.dma_fifo = kvzalloc_node(array_size(df_sz,
1091 sizeof(*sq->db.dma_fifo)),
ca11b798 1092 GFP_KERNEL, numa);
eec4edc9
KC
1093 sq->db.wqe_info = kvzalloc_node(array_size(wq_sz,
1094 sizeof(*sq->db.wqe_info)),
ca11b798 1095 GFP_KERNEL, numa);
77bdf895 1096 if (!sq->db.dma_fifo || !sq->db.wqe_info) {
31391048
SM
1097 mlx5e_free_txqsq_db(sq);
1098 return -ENOMEM;
b5503b99 1099 }
31391048
SM
1100
1101 sq->dma_fifo_mask = df_sz - 1;
1102
1103 return 0;
b5503b99
SM
1104}
1105
de8650a8 1106static void mlx5e_tx_err_cqe_work(struct work_struct *recover_work);
31391048 1107static int mlx5e_alloc_txqsq(struct mlx5e_channel *c,
acc6c595 1108 int txq_ix,
6a9764ef 1109 struct mlx5e_params *params,
31391048 1110 struct mlx5e_sq_param *param,
05909bab
EBE
1111 struct mlx5e_txqsq *sq,
1112 int tc)
f62b8bb8 1113{
31391048 1114 void *sqc_wq = MLX5_ADDR_OF(sqc, param->sqc, wq);
a43b25da 1115 struct mlx5_core_dev *mdev = c->mdev;
ddf385e3 1116 struct mlx5_wq_cyc *wq = &sq->wq;
f62b8bb8
AV
1117 int err;
1118
f10b7cc7 1119 sq->pdev = c->pdev;
a43b25da 1120 sq->tstamp = c->tstamp;
7c39afb3 1121 sq->clock = &mdev->clock;
f10b7cc7
SM
1122 sq->mkey_be = c->mkey_be;
1123 sq->channel = c;
57c70d87 1124 sq->ch_ix = c->ix;
acc6c595 1125 sq->txq_ix = txq_ix;
aff26157 1126 sq->uar_map = mdev->mlx5e_res.bfreg.map;
6a9764ef 1127 sq->min_inline_mode = params->tx_min_inline_mode;
05909bab 1128 sq->stats = &c->priv->channel_stats[c->ix].sq[tc];
01614d4f 1129 sq->stop_room = MLX5E_SQ_STOP_ROOM;
de8650a8 1130 INIT_WORK(&sq->recover_work, mlx5e_tx_err_cqe_work);
2ac9cfe7
IT
1131 if (MLX5_IPSEC_DEV(c->priv->mdev))
1132 set_bit(MLX5E_SQ_STATE_IPSEC, &sq->state);
01614d4f 1133 if (mlx5_accel_is_tls_device(c->priv->mdev)) {
bf239741 1134 set_bit(MLX5E_SQ_STATE_TLS, &sq->state);
01614d4f
TT
1135 sq->stop_room += MLX5E_SQ_TLS_ROOM;
1136 }
f10b7cc7 1137
231243c8 1138 param->wq.db_numa_node = cpu_to_node(c->cpu);
ddf385e3 1139 err = mlx5_wq_cyc_create(mdev, &param->wq, sqc_wq, wq, &sq->wq_ctrl);
f62b8bb8 1140 if (err)
aff26157 1141 return err;
ddf385e3 1142 wq->db = &wq->db[MLX5_SND_DBR];
f62b8bb8 1143
231243c8 1144 err = mlx5e_alloc_txqsq_db(sq, cpu_to_node(c->cpu));
7ec0bb22 1145 if (err)
f62b8bb8
AV
1146 goto err_sq_wq_destroy;
1147
cbce4f44
TG
1148 INIT_WORK(&sq->dim.work, mlx5e_tx_dim_work);
1149 sq->dim.mode = params->tx_cq_moderation.cq_period_mode;
1150
f62b8bb8
AV
1151 return 0;
1152
1153err_sq_wq_destroy:
1154 mlx5_wq_destroy(&sq->wq_ctrl);
1155
f62b8bb8
AV
1156 return err;
1157}
1158
31391048 1159static void mlx5e_free_txqsq(struct mlx5e_txqsq *sq)
f62b8bb8 1160{
31391048 1161 mlx5e_free_txqsq_db(sq);
f62b8bb8 1162 mlx5_wq_destroy(&sq->wq_ctrl);
f62b8bb8
AV
1163}
1164
33ad9711
SM
1165struct mlx5e_create_sq_param {
1166 struct mlx5_wq_ctrl *wq_ctrl;
1167 u32 cqn;
1168 u32 tisn;
1169 u8 tis_lst_sz;
1170 u8 min_inline_mode;
1171};
1172
a43b25da 1173static int mlx5e_create_sq(struct mlx5_core_dev *mdev,
33ad9711
SM
1174 struct mlx5e_sq_param *param,
1175 struct mlx5e_create_sq_param *csp,
1176 u32 *sqn)
f62b8bb8 1177{
f62b8bb8
AV
1178 void *in;
1179 void *sqc;
1180 void *wq;
1181 int inlen;
1182 int err;
1183
1184 inlen = MLX5_ST_SZ_BYTES(create_sq_in) +
33ad9711 1185 sizeof(u64) * csp->wq_ctrl->buf.npages;
1b9a07ee 1186 in = kvzalloc(inlen, GFP_KERNEL);
f62b8bb8
AV
1187 if (!in)
1188 return -ENOMEM;
1189
1190 sqc = MLX5_ADDR_OF(create_sq_in, in, ctx);
1191 wq = MLX5_ADDR_OF(sqc, sqc, wq);
1192
1193 memcpy(sqc, param->sqc, sizeof(param->sqc));
33ad9711
SM
1194 MLX5_SET(sqc, sqc, tis_lst_sz, csp->tis_lst_sz);
1195 MLX5_SET(sqc, sqc, tis_num_0, csp->tisn);
1196 MLX5_SET(sqc, sqc, cqn, csp->cqn);
a6f402e4
SM
1197
1198 if (MLX5_CAP_ETH(mdev, wqe_inline_mode) == MLX5_CAP_INLINE_MODE_VPORT_CONTEXT)
33ad9711 1199 MLX5_SET(sqc, sqc, min_wqe_inline_mode, csp->min_inline_mode);
a6f402e4 1200
33ad9711 1201 MLX5_SET(sqc, sqc, state, MLX5_SQC_STATE_RST);
db75373c 1202 MLX5_SET(sqc, sqc, flush_in_error_en, 1);
f62b8bb8
AV
1203
1204 MLX5_SET(wq, wq, wq_type, MLX5_WQ_TYPE_CYCLIC);
a43b25da 1205 MLX5_SET(wq, wq, uar_page, mdev->mlx5e_res.bfreg.index);
33ad9711 1206 MLX5_SET(wq, wq, log_wq_pg_sz, csp->wq_ctrl->buf.page_shift -
68cdf5d6 1207 MLX5_ADAPTER_PAGE_SHIFT);
33ad9711 1208 MLX5_SET64(wq, wq, dbr_addr, csp->wq_ctrl->db.dma);
f62b8bb8 1209
3a2f7033
TT
1210 mlx5_fill_page_frag_array(&csp->wq_ctrl->buf,
1211 (__be64 *)MLX5_ADDR_OF(wq, wq, pas));
f62b8bb8 1212
33ad9711 1213 err = mlx5_core_create_sq(mdev, in, inlen, sqn);
f62b8bb8
AV
1214
1215 kvfree(in);
1216
1217 return err;
1218}
1219
de8650a8
EBE
1220int mlx5e_modify_sq(struct mlx5_core_dev *mdev, u32 sqn,
1221 struct mlx5e_modify_sq_param *p)
f62b8bb8 1222{
f62b8bb8
AV
1223 void *in;
1224 void *sqc;
1225 int inlen;
1226 int err;
1227
1228 inlen = MLX5_ST_SZ_BYTES(modify_sq_in);
1b9a07ee 1229 in = kvzalloc(inlen, GFP_KERNEL);
f62b8bb8
AV
1230 if (!in)
1231 return -ENOMEM;
1232
1233 sqc = MLX5_ADDR_OF(modify_sq_in, in, ctx);
1234
33ad9711
SM
1235 MLX5_SET(modify_sq_in, in, sq_state, p->curr_state);
1236 MLX5_SET(sqc, sqc, state, p->next_state);
1237 if (p->rl_update && p->next_state == MLX5_SQC_STATE_RDY) {
507f0c81 1238 MLX5_SET64(modify_sq_in, in, modify_bitmask, 1);
33ad9711 1239 MLX5_SET(sqc, sqc, packet_pacing_rate_limit_index, p->rl_index);
507f0c81 1240 }
f62b8bb8 1241
33ad9711 1242 err = mlx5_core_modify_sq(mdev, sqn, in, inlen);
f62b8bb8
AV
1243
1244 kvfree(in);
1245
1246 return err;
1247}
1248
a43b25da 1249static void mlx5e_destroy_sq(struct mlx5_core_dev *mdev, u32 sqn)
33ad9711 1250{
a43b25da 1251 mlx5_core_destroy_sq(mdev, sqn);
f62b8bb8
AV
1252}
1253
a43b25da 1254static int mlx5e_create_sq_rdy(struct mlx5_core_dev *mdev,
31391048
SM
1255 struct mlx5e_sq_param *param,
1256 struct mlx5e_create_sq_param *csp,
1257 u32 *sqn)
f62b8bb8 1258{
33ad9711 1259 struct mlx5e_modify_sq_param msp = {0};
31391048
SM
1260 int err;
1261
a43b25da 1262 err = mlx5e_create_sq(mdev, param, csp, sqn);
31391048
SM
1263 if (err)
1264 return err;
1265
1266 msp.curr_state = MLX5_SQC_STATE_RST;
1267 msp.next_state = MLX5_SQC_STATE_RDY;
a43b25da 1268 err = mlx5e_modify_sq(mdev, *sqn, &msp);
31391048 1269 if (err)
a43b25da 1270 mlx5e_destroy_sq(mdev, *sqn);
31391048
SM
1271
1272 return err;
1273}
1274
7f859ecf
SM
1275static int mlx5e_set_sq_maxrate(struct net_device *dev,
1276 struct mlx5e_txqsq *sq, u32 rate);
1277
31391048 1278static int mlx5e_open_txqsq(struct mlx5e_channel *c,
a43b25da 1279 u32 tisn,
acc6c595 1280 int txq_ix,
6a9764ef 1281 struct mlx5e_params *params,
31391048 1282 struct mlx5e_sq_param *param,
05909bab
EBE
1283 struct mlx5e_txqsq *sq,
1284 int tc)
31391048
SM
1285{
1286 struct mlx5e_create_sq_param csp = {};
7f859ecf 1287 u32 tx_rate;
f62b8bb8
AV
1288 int err;
1289
05909bab 1290 err = mlx5e_alloc_txqsq(c, txq_ix, params, param, sq, tc);
f62b8bb8
AV
1291 if (err)
1292 return err;
1293
a43b25da 1294 csp.tisn = tisn;
31391048 1295 csp.tis_lst_sz = 1;
33ad9711
SM
1296 csp.cqn = sq->cq.mcq.cqn;
1297 csp.wq_ctrl = &sq->wq_ctrl;
1298 csp.min_inline_mode = sq->min_inline_mode;
a43b25da 1299 err = mlx5e_create_sq_rdy(c->mdev, param, &csp, &sq->sqn);
f62b8bb8 1300 if (err)
31391048 1301 goto err_free_txqsq;
f62b8bb8 1302
a43b25da 1303 tx_rate = c->priv->tx_rates[sq->txq_ix];
7f859ecf 1304 if (tx_rate)
a43b25da 1305 mlx5e_set_sq_maxrate(c->netdev, sq, tx_rate);
7f859ecf 1306
cbce4f44
TG
1307 if (params->tx_dim_enabled)
1308 sq->state |= BIT(MLX5E_SQ_STATE_AM);
1309
f62b8bb8
AV
1310 return 0;
1311
31391048 1312err_free_txqsq:
3b77235b 1313 clear_bit(MLX5E_SQ_STATE_ENABLED, &sq->state);
31391048 1314 mlx5e_free_txqsq(sq);
f62b8bb8
AV
1315
1316 return err;
1317}
1318
de8650a8 1319void mlx5e_activate_txqsq(struct mlx5e_txqsq *sq)
acc6c595 1320{
a43b25da 1321 sq->txq = netdev_get_tx_queue(sq->channel->netdev, sq->txq_ix);
db75373c 1322 clear_bit(MLX5E_SQ_STATE_RECOVERING, &sq->state);
acc6c595
SM
1323 set_bit(MLX5E_SQ_STATE_ENABLED, &sq->state);
1324 netdev_tx_reset_queue(sq->txq);
1325 netif_tx_start_queue(sq->txq);
1326}
1327
de8650a8 1328void mlx5e_tx_disable_queue(struct netdev_queue *txq)
f62b8bb8
AV
1329{
1330 __netif_tx_lock_bh(txq);
1331 netif_tx_stop_queue(txq);
1332 __netif_tx_unlock_bh(txq);
1333}
1334
acc6c595 1335static void mlx5e_deactivate_txqsq(struct mlx5e_txqsq *sq)
f62b8bb8 1336{
33ad9711 1337 struct mlx5e_channel *c = sq->channel;
ddf385e3 1338 struct mlx5_wq_cyc *wq = &sq->wq;
33ad9711 1339
c0f1147d 1340 clear_bit(MLX5E_SQ_STATE_ENABLED, &sq->state);
6e8dd6d6 1341 /* prevent netif_tx_wake_queue */
33ad9711 1342 napi_synchronize(&c->napi);
29429f33 1343
de8650a8 1344 mlx5e_tx_disable_queue(sq->txq);
f62b8bb8 1345
31391048 1346 /* last doorbell out, godspeed .. */
ddf385e3
TT
1347 if (mlx5e_wqc_has_room_for(wq, sq->cc, sq->pc, 1)) {
1348 u16 pi = mlx5_wq_cyc_ctr2ix(wq, sq->pc);
31391048 1349 struct mlx5e_tx_wqe *nop;
864b2d71 1350
ddf385e3
TT
1351 sq->db.wqe_info[pi].skb = NULL;
1352 nop = mlx5e_post_nop(wq, sq->sqn, &sq->pc);
1353 mlx5e_notify_hw(wq, sq->pc, sq->uar_map, &nop->ctrl);
29429f33 1354 }
acc6c595
SM
1355}
1356
1357static void mlx5e_close_txqsq(struct mlx5e_txqsq *sq)
1358{
1359 struct mlx5e_channel *c = sq->channel;
a43b25da 1360 struct mlx5_core_dev *mdev = c->mdev;
05d3ac97 1361 struct mlx5_rate_limit rl = {0};
f62b8bb8 1362
fa2bf86b 1363 cancel_work_sync(&sq->dim.work);
de8650a8 1364 cancel_work_sync(&sq->recover_work);
a43b25da 1365 mlx5e_destroy_sq(mdev, sq->sqn);
05d3ac97
BW
1366 if (sq->rate_limit) {
1367 rl.rate = sq->rate_limit;
1368 mlx5_rl_remove_rate(mdev, &rl);
1369 }
31391048
SM
1370 mlx5e_free_txqsq_descs(sq);
1371 mlx5e_free_txqsq(sq);
1372}
1373
de8650a8 1374static void mlx5e_tx_err_cqe_work(struct work_struct *recover_work)
db75373c 1375{
de8650a8
EBE
1376 struct mlx5e_txqsq *sq = container_of(recover_work, struct mlx5e_txqsq,
1377 recover_work);
30e5c2c6 1378
de8650a8 1379 mlx5e_tx_reporter_err_cqe(sq);
db75373c
EBE
1380}
1381
db05815b
MM
1382int mlx5e_open_icosq(struct mlx5e_channel *c, struct mlx5e_params *params,
1383 struct mlx5e_sq_param *param, struct mlx5e_icosq *sq)
31391048
SM
1384{
1385 struct mlx5e_create_sq_param csp = {};
1386 int err;
1387
6a9764ef 1388 err = mlx5e_alloc_icosq(c, param, sq);
31391048
SM
1389 if (err)
1390 return err;
1391
1392 csp.cqn = sq->cq.mcq.cqn;
1393 csp.wq_ctrl = &sq->wq_ctrl;
6a9764ef 1394 csp.min_inline_mode = params->tx_min_inline_mode;
31391048 1395 set_bit(MLX5E_SQ_STATE_ENABLED, &sq->state);
a43b25da 1396 err = mlx5e_create_sq_rdy(c->mdev, param, &csp, &sq->sqn);
31391048
SM
1397 if (err)
1398 goto err_free_icosq;
1399
1400 return 0;
1401
1402err_free_icosq:
1403 clear_bit(MLX5E_SQ_STATE_ENABLED, &sq->state);
1404 mlx5e_free_icosq(sq);
1405
1406 return err;
1407}
1408
db05815b 1409void mlx5e_close_icosq(struct mlx5e_icosq *sq)
31391048
SM
1410{
1411 struct mlx5e_channel *c = sq->channel;
1412
1413 clear_bit(MLX5E_SQ_STATE_ENABLED, &sq->state);
1414 napi_synchronize(&c->napi);
1415
a43b25da 1416 mlx5e_destroy_sq(c->mdev, sq->sqn);
31391048
SM
1417 mlx5e_free_icosq(sq);
1418}
1419
db05815b
MM
1420int mlx5e_open_xdpsq(struct mlx5e_channel *c, struct mlx5e_params *params,
1421 struct mlx5e_sq_param *param, struct xdp_umem *umem,
1422 struct mlx5e_xdpsq *sq, bool is_redirect)
31391048 1423{
31391048 1424 struct mlx5e_create_sq_param csp = {};
31391048 1425 int err;
31391048 1426
db05815b 1427 err = mlx5e_alloc_xdpsq(c, params, umem, param, sq, is_redirect);
31391048
SM
1428 if (err)
1429 return err;
1430
1431 csp.tis_lst_sz = 1;
a43b25da 1432 csp.tisn = c->priv->tisn[0]; /* tc = 0 */
31391048
SM
1433 csp.cqn = sq->cq.mcq.cqn;
1434 csp.wq_ctrl = &sq->wq_ctrl;
1435 csp.min_inline_mode = sq->min_inline_mode;
1436 set_bit(MLX5E_SQ_STATE_ENABLED, &sq->state);
a43b25da 1437 err = mlx5e_create_sq_rdy(c->mdev, param, &csp, &sq->sqn);
31391048
SM
1438 if (err)
1439 goto err_free_xdpsq;
1440
5e0d2eef
TT
1441 mlx5e_set_xmit_fp(sq, param->is_mpw);
1442
1443 if (!param->is_mpw) {
1444 unsigned int ds_cnt = MLX5E_XDP_TX_DS_COUNT;
1445 unsigned int inline_hdr_sz = 0;
1446 int i;
31391048 1447
5e0d2eef
TT
1448 if (sq->min_inline_mode != MLX5_INLINE_MODE_NONE) {
1449 inline_hdr_sz = MLX5E_XDP_MIN_INLINE;
1450 ds_cnt++;
1451 }
1452
1453 /* Pre initialize fixed WQE fields */
1454 for (i = 0; i < mlx5_wq_cyc_get_size(&sq->wq); i++) {
1455 struct mlx5e_xdp_wqe_info *wi = &sq->db.wqe_info[i];
1456 struct mlx5e_tx_wqe *wqe = mlx5_wq_cyc_get_wqe(&sq->wq, i);
1457 struct mlx5_wqe_ctrl_seg *cseg = &wqe->ctrl;
1458 struct mlx5_wqe_eth_seg *eseg = &wqe->eth;
1459 struct mlx5_wqe_data_seg *dseg;
31391048 1460
5e0d2eef
TT
1461 cseg->qpn_ds = cpu_to_be32((sq->sqn << 8) | ds_cnt);
1462 eseg->inline_hdr.sz = cpu_to_be16(inline_hdr_sz);
31391048 1463
5e0d2eef
TT
1464 dseg = (struct mlx5_wqe_data_seg *)cseg + (ds_cnt - 1);
1465 dseg->lkey = sq->mkey_be;
1feeab80 1466
5e0d2eef 1467 wi->num_wqebbs = 1;
c2273219 1468 wi->num_pkts = 1;
5e0d2eef 1469 }
31391048
SM
1470 }
1471
1472 return 0;
1473
1474err_free_xdpsq:
1475 clear_bit(MLX5E_SQ_STATE_ENABLED, &sq->state);
1476 mlx5e_free_xdpsq(sq);
1477
1478 return err;
1479}
1480
db05815b 1481void mlx5e_close_xdpsq(struct mlx5e_xdpsq *sq)
31391048
SM
1482{
1483 struct mlx5e_channel *c = sq->channel;
1484
1485 clear_bit(MLX5E_SQ_STATE_ENABLED, &sq->state);
1486 napi_synchronize(&c->napi);
1487
a43b25da 1488 mlx5e_destroy_sq(c->mdev, sq->sqn);
b9673cf5 1489 mlx5e_free_xdpsq_descs(sq);
31391048 1490 mlx5e_free_xdpsq(sq);
f62b8bb8
AV
1491}
1492
95b6c6a5
EBE
1493static int mlx5e_alloc_cq_common(struct mlx5_core_dev *mdev,
1494 struct mlx5e_cq_param *param,
1495 struct mlx5e_cq *cq)
f62b8bb8 1496{
f62b8bb8
AV
1497 struct mlx5_core_cq *mcq = &cq->mcq;
1498 int eqn_not_used;
0b6e26ce 1499 unsigned int irqn;
f62b8bb8
AV
1500 int err;
1501 u32 i;
1502
a1f240f1
YA
1503 err = mlx5_vector2eqn(mdev, param->eq_ix, &eqn_not_used, &irqn);
1504 if (err)
1505 return err;
1506
f62b8bb8
AV
1507 err = mlx5_cqwq_create(mdev, &param->wq, param->cqc, &cq->wq,
1508 &cq->wq_ctrl);
1509 if (err)
1510 return err;
1511
f62b8bb8
AV
1512 mcq->cqe_sz = 64;
1513 mcq->set_ci_db = cq->wq_ctrl.db.db;
1514 mcq->arm_db = cq->wq_ctrl.db.db + 1;
1515 *mcq->set_ci_db = 0;
1516 *mcq->arm_db = 0;
1517 mcq->vector = param->eq_ix;
1518 mcq->comp = mlx5e_completion_event;
1519 mcq->event = mlx5e_cq_error_event;
1520 mcq->irqn = irqn;
f62b8bb8
AV
1521
1522 for (i = 0; i < mlx5_cqwq_get_size(&cq->wq); i++) {
1523 struct mlx5_cqe64 *cqe = mlx5_cqwq_get_wqe(&cq->wq, i);
1524
1525 cqe->op_own = 0xf1;
1526 }
1527
a43b25da 1528 cq->mdev = mdev;
f62b8bb8
AV
1529
1530 return 0;
1531}
1532
95b6c6a5
EBE
1533static int mlx5e_alloc_cq(struct mlx5e_channel *c,
1534 struct mlx5e_cq_param *param,
1535 struct mlx5e_cq *cq)
1536{
1537 struct mlx5_core_dev *mdev = c->priv->mdev;
1538 int err;
1539
231243c8
SM
1540 param->wq.buf_numa_node = cpu_to_node(c->cpu);
1541 param->wq.db_numa_node = cpu_to_node(c->cpu);
95b6c6a5
EBE
1542 param->eq_ix = c->ix;
1543
1544 err = mlx5e_alloc_cq_common(mdev, param, cq);
1545
1546 cq->napi = &c->napi;
1547 cq->channel = c;
1548
1549 return err;
1550}
1551
3b77235b 1552static void mlx5e_free_cq(struct mlx5e_cq *cq)
f62b8bb8 1553{
3a2f7033 1554 mlx5_wq_destroy(&cq->wq_ctrl);
f62b8bb8
AV
1555}
1556
3b77235b 1557static int mlx5e_create_cq(struct mlx5e_cq *cq, struct mlx5e_cq_param *param)
f62b8bb8 1558{
38164b77 1559 u32 out[MLX5_ST_SZ_DW(create_cq_out)];
a43b25da 1560 struct mlx5_core_dev *mdev = cq->mdev;
f62b8bb8
AV
1561 struct mlx5_core_cq *mcq = &cq->mcq;
1562
1563 void *in;
1564 void *cqc;
1565 int inlen;
0b6e26ce 1566 unsigned int irqn_not_used;
f62b8bb8
AV
1567 int eqn;
1568 int err;
1569
a1f240f1
YA
1570 err = mlx5_vector2eqn(mdev, param->eq_ix, &eqn, &irqn_not_used);
1571 if (err)
1572 return err;
1573
f62b8bb8 1574 inlen = MLX5_ST_SZ_BYTES(create_cq_in) +
3a2f7033 1575 sizeof(u64) * cq->wq_ctrl.buf.npages;
1b9a07ee 1576 in = kvzalloc(inlen, GFP_KERNEL);
f62b8bb8
AV
1577 if (!in)
1578 return -ENOMEM;
1579
1580 cqc = MLX5_ADDR_OF(create_cq_in, in, cq_context);
1581
1582 memcpy(cqc, param->cqc, sizeof(param->cqc));
1583
3a2f7033 1584 mlx5_fill_page_frag_array(&cq->wq_ctrl.buf,
1c1b5228 1585 (__be64 *)MLX5_ADDR_OF(create_cq_in, in, pas));
f62b8bb8 1586
9908aa29 1587 MLX5_SET(cqc, cqc, cq_period_mode, param->cq_period_mode);
f62b8bb8 1588 MLX5_SET(cqc, cqc, c_eqn, eqn);
30aa60b3 1589 MLX5_SET(cqc, cqc, uar_page, mdev->priv.uar->index);
3a2f7033 1590 MLX5_SET(cqc, cqc, log_page_size, cq->wq_ctrl.buf.page_shift -
68cdf5d6 1591 MLX5_ADAPTER_PAGE_SHIFT);
f62b8bb8
AV
1592 MLX5_SET64(cqc, cqc, dbr_addr, cq->wq_ctrl.db.dma);
1593
38164b77 1594 err = mlx5_core_create_cq(mdev, mcq, in, inlen, out, sizeof(out));
f62b8bb8
AV
1595
1596 kvfree(in);
1597
1598 if (err)
1599 return err;
1600
1601 mlx5e_cq_arm(cq);
1602
1603 return 0;
1604}
1605
3b77235b 1606static void mlx5e_destroy_cq(struct mlx5e_cq *cq)
f62b8bb8 1607{
a43b25da 1608 mlx5_core_destroy_cq(cq->mdev, &cq->mcq);
f62b8bb8
AV
1609}
1610
c4cde580 1611int mlx5e_open_cq(struct mlx5e_channel *c, struct dim_cq_moder moder,
db05815b 1612 struct mlx5e_cq_param *param, struct mlx5e_cq *cq)
f62b8bb8 1613{
a43b25da 1614 struct mlx5_core_dev *mdev = c->mdev;
f62b8bb8 1615 int err;
f62b8bb8 1616
3b77235b 1617 err = mlx5e_alloc_cq(c, param, cq);
f62b8bb8
AV
1618 if (err)
1619 return err;
1620
3b77235b 1621 err = mlx5e_create_cq(cq, param);
f62b8bb8 1622 if (err)
3b77235b 1623 goto err_free_cq;
f62b8bb8 1624
7524a5d8 1625 if (MLX5_CAP_GEN(mdev, cq_moderation))
6a9764ef 1626 mlx5_core_modify_cq_moderation(mdev, &cq->mcq, moder.usec, moder.pkts);
f62b8bb8
AV
1627 return 0;
1628
3b77235b
SM
1629err_free_cq:
1630 mlx5e_free_cq(cq);
f62b8bb8
AV
1631
1632 return err;
1633}
1634
db05815b 1635void mlx5e_close_cq(struct mlx5e_cq *cq)
f62b8bb8 1636{
f62b8bb8 1637 mlx5e_destroy_cq(cq);
3b77235b 1638 mlx5e_free_cq(cq);
f62b8bb8
AV
1639}
1640
f62b8bb8 1641static int mlx5e_open_tx_cqs(struct mlx5e_channel *c,
6a9764ef 1642 struct mlx5e_params *params,
f62b8bb8
AV
1643 struct mlx5e_channel_param *cparam)
1644{
f62b8bb8
AV
1645 int err;
1646 int tc;
1647
1648 for (tc = 0; tc < c->num_tc; tc++) {
6a9764ef
SM
1649 err = mlx5e_open_cq(c, params->tx_cq_moderation,
1650 &cparam->tx_cq, &c->sq[tc].cq);
f62b8bb8
AV
1651 if (err)
1652 goto err_close_tx_cqs;
f62b8bb8
AV
1653 }
1654
1655 return 0;
1656
1657err_close_tx_cqs:
1658 for (tc--; tc >= 0; tc--)
1659 mlx5e_close_cq(&c->sq[tc].cq);
1660
1661 return err;
1662}
1663
1664static void mlx5e_close_tx_cqs(struct mlx5e_channel *c)
1665{
1666 int tc;
1667
1668 for (tc = 0; tc < c->num_tc; tc++)
1669 mlx5e_close_cq(&c->sq[tc].cq);
1670}
1671
1672static int mlx5e_open_sqs(struct mlx5e_channel *c,
6a9764ef 1673 struct mlx5e_params *params,
f62b8bb8
AV
1674 struct mlx5e_channel_param *cparam)
1675{
05909bab 1676 struct mlx5e_priv *priv = c->priv;
779d986d 1677 int err, tc, max_nch = mlx5e_get_netdev_max_channels(priv->netdev);
f62b8bb8 1678
6a9764ef 1679 for (tc = 0; tc < params->num_tc; tc++) {
05909bab 1680 int txq_ix = c->ix + tc * max_nch;
acc6c595 1681
a43b25da 1682 err = mlx5e_open_txqsq(c, c->priv->tisn[tc], txq_ix,
05909bab 1683 params, &cparam->sq, &c->sq[tc], tc);
f62b8bb8
AV
1684 if (err)
1685 goto err_close_sqs;
1686 }
1687
1688 return 0;
1689
1690err_close_sqs:
1691 for (tc--; tc >= 0; tc--)
31391048 1692 mlx5e_close_txqsq(&c->sq[tc]);
f62b8bb8
AV
1693
1694 return err;
1695}
1696
1697static void mlx5e_close_sqs(struct mlx5e_channel *c)
1698{
1699 int tc;
1700
1701 for (tc = 0; tc < c->num_tc; tc++)
31391048 1702 mlx5e_close_txqsq(&c->sq[tc]);
f62b8bb8
AV
1703}
1704
507f0c81 1705static int mlx5e_set_sq_maxrate(struct net_device *dev,
31391048 1706 struct mlx5e_txqsq *sq, u32 rate)
507f0c81
YP
1707{
1708 struct mlx5e_priv *priv = netdev_priv(dev);
1709 struct mlx5_core_dev *mdev = priv->mdev;
33ad9711 1710 struct mlx5e_modify_sq_param msp = {0};
05d3ac97 1711 struct mlx5_rate_limit rl = {0};
507f0c81
YP
1712 u16 rl_index = 0;
1713 int err;
1714
1715 if (rate == sq->rate_limit)
1716 /* nothing to do */
1717 return 0;
1718
05d3ac97
BW
1719 if (sq->rate_limit) {
1720 rl.rate = sq->rate_limit;
507f0c81 1721 /* remove current rl index to free space to next ones */
05d3ac97
BW
1722 mlx5_rl_remove_rate(mdev, &rl);
1723 }
507f0c81
YP
1724
1725 sq->rate_limit = 0;
1726
1727 if (rate) {
05d3ac97
BW
1728 rl.rate = rate;
1729 err = mlx5_rl_add_rate(mdev, &rl_index, &rl);
507f0c81
YP
1730 if (err) {
1731 netdev_err(dev, "Failed configuring rate %u: %d\n",
1732 rate, err);
1733 return err;
1734 }
1735 }
1736
33ad9711
SM
1737 msp.curr_state = MLX5_SQC_STATE_RDY;
1738 msp.next_state = MLX5_SQC_STATE_RDY;
1739 msp.rl_index = rl_index;
1740 msp.rl_update = true;
a43b25da 1741 err = mlx5e_modify_sq(mdev, sq->sqn, &msp);
507f0c81
YP
1742 if (err) {
1743 netdev_err(dev, "Failed configuring rate %u: %d\n",
1744 rate, err);
1745 /* remove the rate from the table */
1746 if (rate)
05d3ac97 1747 mlx5_rl_remove_rate(mdev, &rl);
507f0c81
YP
1748 return err;
1749 }
1750
1751 sq->rate_limit = rate;
1752 return 0;
1753}
1754
1755static int mlx5e_set_tx_maxrate(struct net_device *dev, int index, u32 rate)
1756{
1757 struct mlx5e_priv *priv = netdev_priv(dev);
1758 struct mlx5_core_dev *mdev = priv->mdev;
acc6c595 1759 struct mlx5e_txqsq *sq = priv->txq2sq[index];
507f0c81
YP
1760 int err = 0;
1761
1762 if (!mlx5_rl_is_supported(mdev)) {
1763 netdev_err(dev, "Rate limiting is not supported on this device\n");
1764 return -EINVAL;
1765 }
1766
1767 /* rate is given in Mb/sec, HW config is in Kb/sec */
1768 rate = rate << 10;
1769
1770 /* Check whether rate in valid range, 0 is always valid */
1771 if (rate && !mlx5_rl_is_in_range(mdev, rate)) {
1772 netdev_err(dev, "TX rate %u, is not in range\n", rate);
1773 return -ERANGE;
1774 }
1775
1776 mutex_lock(&priv->state_lock);
1777 if (test_bit(MLX5E_STATE_OPENED, &priv->state))
1778 err = mlx5e_set_sq_maxrate(dev, sq, rate);
1779 if (!err)
1780 priv->tx_rates[index] = rate;
1781 mutex_unlock(&priv->state_lock);
1782
1783 return err;
1784}
1785
149e566f
MS
1786static int mlx5e_alloc_xps_cpumask(struct mlx5e_channel *c,
1787 struct mlx5e_params *params)
1788{
1789 int num_comp_vectors = mlx5_comp_vectors_count(c->mdev);
1790 int irq;
1791
1792 if (!zalloc_cpumask_var(&c->xps_cpumask, GFP_KERNEL))
1793 return -ENOMEM;
1794
1795 for (irq = c->ix; irq < num_comp_vectors; irq += params->num_channels) {
1796 int cpu = cpumask_first(mlx5_comp_irq_get_affinity_mask(c->mdev, irq));
1797
1798 cpumask_set_cpu(cpu, c->xps_cpumask);
1799 }
1800
1801 return 0;
1802}
1803
1804static void mlx5e_free_xps_cpumask(struct mlx5e_channel *c)
1805{
1806 free_cpumask_var(c->xps_cpumask);
1807}
1808
0a06382f
MM
1809static int mlx5e_open_queues(struct mlx5e_channel *c,
1810 struct mlx5e_params *params,
1811 struct mlx5e_channel_param *cparam)
f62b8bb8 1812{
8960b389 1813 struct dim_cq_moder icocq_moder = {0, 0};
f62b8bb8 1814 int err;
f62b8bb8 1815
6a9764ef 1816 err = mlx5e_open_cq(c, icocq_moder, &cparam->icosq_cq, &c->icosq.cq);
f62b8bb8 1817 if (err)
0a06382f 1818 return err;
f62b8bb8 1819
6a9764ef 1820 err = mlx5e_open_tx_cqs(c, params, cparam);
d3c9bc27
TT
1821 if (err)
1822 goto err_close_icosq_cq;
1823
58b99ee3 1824 err = mlx5e_open_cq(c, params->tx_cq_moderation, &cparam->tx_cq, &c->xdpsq.cq);
f62b8bb8
AV
1825 if (err)
1826 goto err_close_tx_cqs;
f62b8bb8 1827
58b99ee3
TT
1828 err = mlx5e_open_cq(c, params->rx_cq_moderation, &cparam->rx_cq, &c->rq.cq);
1829 if (err)
1830 goto err_close_xdp_tx_cqs;
1831
d7a0ecab 1832 /* XDP SQ CQ params are same as normal TXQ sq CQ params */
6a9764ef 1833 err = c->xdp ? mlx5e_open_cq(c, params->tx_cq_moderation,
b9673cf5 1834 &cparam->tx_cq, &c->rq_xdpsq.cq) : 0;
d7a0ecab
SM
1835 if (err)
1836 goto err_close_rx_cq;
1837
f62b8bb8
AV
1838 napi_enable(&c->napi);
1839
6a9764ef 1840 err = mlx5e_open_icosq(c, params, &cparam->icosq, &c->icosq);
f62b8bb8
AV
1841 if (err)
1842 goto err_disable_napi;
1843
6a9764ef 1844 err = mlx5e_open_sqs(c, params, cparam);
d3c9bc27
TT
1845 if (err)
1846 goto err_close_icosq;
1847
b9673cf5 1848 if (c->xdp) {
db05815b 1849 err = mlx5e_open_xdpsq(c, params, &cparam->xdp_sq, NULL,
b9673cf5
MM
1850 &c->rq_xdpsq, false);
1851 if (err)
1852 goto err_close_sqs;
1853 }
b5503b99 1854
db05815b 1855 err = mlx5e_open_rq(c, params, &cparam->rq, NULL, NULL, &c->rq);
f62b8bb8 1856 if (err)
b5503b99 1857 goto err_close_xdp_sq;
f62b8bb8 1858
db05815b 1859 err = mlx5e_open_xdpsq(c, params, &cparam->xdp_sq, NULL, &c->xdpsq, true);
58b99ee3
TT
1860 if (err)
1861 goto err_close_rq;
1862
f62b8bb8 1863 return 0;
58b99ee3
TT
1864
1865err_close_rq:
1866 mlx5e_close_rq(&c->rq);
1867
b5503b99 1868err_close_xdp_sq:
d7a0ecab 1869 if (c->xdp)
b9673cf5 1870 mlx5e_close_xdpsq(&c->rq_xdpsq);
f62b8bb8
AV
1871
1872err_close_sqs:
1873 mlx5e_close_sqs(c);
1874
d3c9bc27 1875err_close_icosq:
31391048 1876 mlx5e_close_icosq(&c->icosq);
d3c9bc27 1877
f62b8bb8
AV
1878err_disable_napi:
1879 napi_disable(&c->napi);
0a06382f 1880
d7a0ecab 1881 if (c->xdp)
b9673cf5 1882 mlx5e_close_cq(&c->rq_xdpsq.cq);
d7a0ecab
SM
1883
1884err_close_rx_cq:
f62b8bb8
AV
1885 mlx5e_close_cq(&c->rq.cq);
1886
58b99ee3
TT
1887err_close_xdp_tx_cqs:
1888 mlx5e_close_cq(&c->xdpsq.cq);
1889
f62b8bb8
AV
1890err_close_tx_cqs:
1891 mlx5e_close_tx_cqs(c);
1892
d3c9bc27
TT
1893err_close_icosq_cq:
1894 mlx5e_close_cq(&c->icosq.cq);
1895
0a06382f
MM
1896 return err;
1897}
1898
1899static void mlx5e_close_queues(struct mlx5e_channel *c)
1900{
1901 mlx5e_close_xdpsq(&c->xdpsq);
1902 mlx5e_close_rq(&c->rq);
1903 if (c->xdp)
1904 mlx5e_close_xdpsq(&c->rq_xdpsq);
1905 mlx5e_close_sqs(c);
1906 mlx5e_close_icosq(&c->icosq);
1907 napi_disable(&c->napi);
1908 if (c->xdp)
1909 mlx5e_close_cq(&c->rq_xdpsq.cq);
1910 mlx5e_close_cq(&c->rq.cq);
1911 mlx5e_close_cq(&c->xdpsq.cq);
1912 mlx5e_close_tx_cqs(c);
1913 mlx5e_close_cq(&c->icosq.cq);
1914}
1915
1916static int mlx5e_open_channel(struct mlx5e_priv *priv, int ix,
1917 struct mlx5e_params *params,
1918 struct mlx5e_channel_param *cparam,
db05815b 1919 struct xdp_umem *umem,
0a06382f
MM
1920 struct mlx5e_channel **cp)
1921{
1922 int cpu = cpumask_first(mlx5_comp_irq_get_affinity_mask(priv->mdev, ix));
1923 struct net_device *netdev = priv->netdev;
db05815b 1924 struct mlx5e_xsk_param xsk;
0a06382f
MM
1925 struct mlx5e_channel *c;
1926 unsigned int irq;
1927 int err;
1928 int eqn;
1929
1930 err = mlx5_vector2eqn(priv->mdev, ix, &eqn, &irq);
1931 if (err)
1932 return err;
1933
1934 c = kvzalloc_node(sizeof(*c), GFP_KERNEL, cpu_to_node(cpu));
1935 if (!c)
1936 return -ENOMEM;
1937
1938 c->priv = priv;
1939 c->mdev = priv->mdev;
1940 c->tstamp = &priv->tstamp;
1941 c->ix = ix;
1942 c->cpu = cpu;
1943 c->pdev = priv->mdev->device;
1944 c->netdev = priv->netdev;
1945 c->mkey_be = cpu_to_be32(priv->mdev->mlx5e_res.mkey.key);
1946 c->num_tc = params->num_tc;
1947 c->xdp = !!params->xdp_prog;
1948 c->stats = &priv->channel_stats[ix].ch;
1949 c->irq_desc = irq_to_desc(irq);
1950
1951 err = mlx5e_alloc_xps_cpumask(c, params);
1952 if (err)
1953 goto err_free_channel;
1954
1955 netif_napi_add(netdev, &c->napi, mlx5e_napi_poll, 64);
1956
1957 err = mlx5e_open_queues(c, params, cparam);
1958 if (unlikely(err))
1959 goto err_napi_del;
1960
db05815b
MM
1961 if (umem) {
1962 mlx5e_build_xsk_param(umem, &xsk);
1963 err = mlx5e_open_xsk(priv, params, &xsk, umem, c);
1964 if (unlikely(err))
1965 goto err_close_queues;
1966 }
1967
0a06382f
MM
1968 *cp = c;
1969
1970 return 0;
1971
db05815b
MM
1972err_close_queues:
1973 mlx5e_close_queues(c);
1974
f62b8bb8
AV
1975err_napi_del:
1976 netif_napi_del(&c->napi);
149e566f
MS
1977 mlx5e_free_xps_cpumask(c);
1978
1979err_free_channel:
ca11b798 1980 kvfree(c);
f62b8bb8
AV
1981
1982 return err;
1983}
1984
acc6c595
SM
1985static void mlx5e_activate_channel(struct mlx5e_channel *c)
1986{
1987 int tc;
1988
1989 for (tc = 0; tc < c->num_tc; tc++)
1990 mlx5e_activate_txqsq(&c->sq[tc]);
1991 mlx5e_activate_rq(&c->rq);
149e566f 1992 netif_set_xps_queue(c->netdev, c->xps_cpumask, c->ix);
db05815b
MM
1993
1994 if (test_bit(MLX5E_CHANNEL_STATE_XSK, c->state))
1995 mlx5e_activate_xsk(c);
acc6c595
SM
1996}
1997
1998static void mlx5e_deactivate_channel(struct mlx5e_channel *c)
1999{
2000 int tc;
2001
db05815b
MM
2002 if (test_bit(MLX5E_CHANNEL_STATE_XSK, c->state))
2003 mlx5e_deactivate_xsk(c);
2004
acc6c595
SM
2005 mlx5e_deactivate_rq(&c->rq);
2006 for (tc = 0; tc < c->num_tc; tc++)
2007 mlx5e_deactivate_txqsq(&c->sq[tc]);
2008}
2009
f62b8bb8
AV
2010static void mlx5e_close_channel(struct mlx5e_channel *c)
2011{
db05815b
MM
2012 if (test_bit(MLX5E_CHANNEL_STATE_XSK, c->state))
2013 mlx5e_close_xsk(c);
0a06382f 2014 mlx5e_close_queues(c);
f62b8bb8 2015 netif_napi_del(&c->napi);
149e566f 2016 mlx5e_free_xps_cpumask(c);
7ae92ae5 2017
ca11b798 2018 kvfree(c);
f62b8bb8
AV
2019}
2020
069d1146
TT
2021#define DEFAULT_FRAG_SIZE (2048)
2022
2023static void mlx5e_build_rq_frags_info(struct mlx5_core_dev *mdev,
2024 struct mlx5e_params *params,
db05815b 2025 struct mlx5e_xsk_param *xsk,
069d1146
TT
2026 struct mlx5e_rq_frags_info *info)
2027{
2028 u32 byte_count = MLX5E_SW2HW_MTU(params, params->sw_mtu);
2029 int frag_size_max = DEFAULT_FRAG_SIZE;
2030 u32 buf_size = 0;
2031 int i;
2032
2033#ifdef CONFIG_MLX5_EN_IPSEC
2034 if (MLX5_IPSEC_DEV(mdev))
2035 byte_count += MLX5E_METADATA_ETHER_LEN;
2036#endif
2037
db05815b 2038 if (mlx5e_rx_is_linear_skb(params, xsk)) {
069d1146
TT
2039 int frag_stride;
2040
db05815b 2041 frag_stride = mlx5e_rx_get_linear_frag_sz(params, xsk);
069d1146
TT
2042 frag_stride = roundup_pow_of_two(frag_stride);
2043
2044 info->arr[0].frag_size = byte_count;
2045 info->arr[0].frag_stride = frag_stride;
2046 info->num_frags = 1;
2047 info->wqe_bulk = PAGE_SIZE / frag_stride;
2048 goto out;
2049 }
2050
2051 if (byte_count > PAGE_SIZE +
2052 (MLX5E_MAX_RX_FRAGS - 1) * frag_size_max)
2053 frag_size_max = PAGE_SIZE;
2054
2055 i = 0;
2056 while (buf_size < byte_count) {
2057 int frag_size = byte_count - buf_size;
2058
2059 if (i < MLX5E_MAX_RX_FRAGS - 1)
2060 frag_size = min(frag_size, frag_size_max);
2061
2062 info->arr[i].frag_size = frag_size;
2063 info->arr[i].frag_stride = roundup_pow_of_two(frag_size);
2064
2065 buf_size += frag_size;
2066 i++;
2067 }
2068 info->num_frags = i;
2069 /* number of different wqes sharing a page */
2070 info->wqe_bulk = 1 + (info->num_frags % 2);
2071
2072out:
2073 info->wqe_bulk = max_t(u8, info->wqe_bulk, 8);
2074 info->log_num_frags = order_base_2(info->num_frags);
2075}
2076
99cbfa93
TT
2077static inline u8 mlx5e_get_rqwq_log_stride(u8 wq_type, int ndsegs)
2078{
2079 int sz = sizeof(struct mlx5_wqe_data_seg) * ndsegs;
2080
2081 switch (wq_type) {
2082 case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ:
2083 sz += sizeof(struct mlx5e_rx_wqe_ll);
2084 break;
2085 default: /* MLX5_WQ_TYPE_CYCLIC */
2086 sz += sizeof(struct mlx5e_rx_wqe_cyc);
2087 }
2088
2089 return order_base_2(sz);
2090}
2091
fd9b4be8
TT
2092static u8 mlx5e_get_rq_log_wq_sz(void *rqc)
2093{
2094 void *wq = MLX5_ADDR_OF(rqc, rqc, wq);
2095
2096 return MLX5_GET(wq, wq, log_wq_sz);
2097}
2098
db05815b
MM
2099void mlx5e_build_rq_param(struct mlx5e_priv *priv,
2100 struct mlx5e_params *params,
2101 struct mlx5e_xsk_param *xsk,
2102 struct mlx5e_rq_param *param)
f62b8bb8 2103{
f1e4fc9b 2104 struct mlx5_core_dev *mdev = priv->mdev;
f62b8bb8
AV
2105 void *rqc = param->rqc;
2106 void *wq = MLX5_ADDR_OF(rqc, rqc, wq);
99cbfa93 2107 int ndsegs = 1;
f62b8bb8 2108
6a9764ef 2109 switch (params->rq_wq_type) {
461017cb 2110 case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ:
f1e4fc9b 2111 MLX5_SET(wq, wq, log_wqe_num_of_strides,
db05815b 2112 mlx5e_mpwqe_get_log_num_strides(mdev, params, xsk) -
619a8f2a 2113 MLX5_MPWQE_LOG_NUM_STRIDES_BASE);
f1e4fc9b 2114 MLX5_SET(wq, wq, log_wqe_stride_size,
db05815b 2115 mlx5e_mpwqe_get_log_stride_size(mdev, params, xsk) -
619a8f2a 2116 MLX5_MPWQE_LOG_STRIDE_SZ_BASE);
db05815b 2117 MLX5_SET(wq, wq, log_wq_sz, mlx5e_mpwqe_get_log_rq_size(params, xsk));
461017cb 2118 break;
99cbfa93 2119 default: /* MLX5_WQ_TYPE_CYCLIC */
73281b78 2120 MLX5_SET(wq, wq, log_wq_sz, params->log_rq_mtu_frames);
db05815b 2121 mlx5e_build_rq_frags_info(mdev, params, xsk, &param->frags_info);
069d1146 2122 ndsegs = param->frags_info.num_frags;
461017cb
TT
2123 }
2124
99cbfa93 2125 MLX5_SET(wq, wq, wq_type, params->rq_wq_type);
f62b8bb8 2126 MLX5_SET(wq, wq, end_padding_mode, MLX5_WQ_END_PAD_MODE_ALIGN);
99cbfa93
TT
2127 MLX5_SET(wq, wq, log_wq_stride,
2128 mlx5e_get_rqwq_log_stride(params->rq_wq_type, ndsegs));
f1e4fc9b 2129 MLX5_SET(wq, wq, pd, mdev->mlx5e_res.pdn);
593cf338 2130 MLX5_SET(rqc, rqc, counter_set_id, priv->q_counter);
6a9764ef 2131 MLX5_SET(rqc, rqc, vsd, params->vlan_strip_disable);
102722fc 2132 MLX5_SET(rqc, rqc, scatter_fcs, params->scatter_fcs_en);
f62b8bb8 2133
c42260f1 2134 param->wq.buf_numa_node = dev_to_node(mdev->device);
f62b8bb8
AV
2135}
2136
7cbaf9a3 2137static void mlx5e_build_drop_rq_param(struct mlx5e_priv *priv,
2f0db879 2138 struct mlx5e_rq_param *param)
556dd1b9 2139{
7cbaf9a3 2140 struct mlx5_core_dev *mdev = priv->mdev;
556dd1b9
TT
2141 void *rqc = param->rqc;
2142 void *wq = MLX5_ADDR_OF(rqc, rqc, wq);
2143
99cbfa93
TT
2144 MLX5_SET(wq, wq, wq_type, MLX5_WQ_TYPE_CYCLIC);
2145 MLX5_SET(wq, wq, log_wq_stride,
2146 mlx5e_get_rqwq_log_stride(MLX5_WQ_TYPE_CYCLIC, 1));
7cbaf9a3 2147 MLX5_SET(rqc, rqc, counter_set_id, priv->drop_rq_q_counter);
2f0db879 2148
c42260f1 2149 param->wq.buf_numa_node = dev_to_node(mdev->device);
556dd1b9
TT
2150}
2151
db05815b
MM
2152void mlx5e_build_sq_param_common(struct mlx5e_priv *priv,
2153 struct mlx5e_sq_param *param)
f62b8bb8
AV
2154{
2155 void *sqc = param->sqc;
2156 void *wq = MLX5_ADDR_OF(sqc, sqc, wq);
2157
f62b8bb8 2158 MLX5_SET(wq, wq, log_wq_stride, ilog2(MLX5_SEND_WQE_BB));
b50d292b 2159 MLX5_SET(wq, wq, pd, priv->mdev->mlx5e_res.pdn);
f62b8bb8 2160
c42260f1 2161 param->wq.buf_numa_node = dev_to_node(priv->mdev->device);
d3c9bc27
TT
2162}
2163
2164static void mlx5e_build_sq_param(struct mlx5e_priv *priv,
6a9764ef 2165 struct mlx5e_params *params,
d3c9bc27
TT
2166 struct mlx5e_sq_param *param)
2167{
2168 void *sqc = param->sqc;
2169 void *wq = MLX5_ADDR_OF(sqc, sqc, wq);
e3cfc7e6 2170 bool allow_swp;
d3c9bc27 2171
e3cfc7e6
MS
2172 allow_swp = mlx5_geneve_tx_allowed(priv->mdev) ||
2173 !!MLX5_IPSEC_DEV(priv->mdev);
d3c9bc27 2174 mlx5e_build_sq_param_common(priv, param);
6a9764ef 2175 MLX5_SET(wq, wq, log_wq_sz, params->log_sq_size);
e3cfc7e6 2176 MLX5_SET(sqc, sqc, allow_swp, allow_swp);
f62b8bb8
AV
2177}
2178
2179static void mlx5e_build_common_cq_param(struct mlx5e_priv *priv,
2180 struct mlx5e_cq_param *param)
2181{
2182 void *cqc = param->cqc;
2183
30aa60b3 2184 MLX5_SET(cqc, cqc, uar_page, priv->mdev->priv.uar->index);
939de57d
DJ
2185 if (MLX5_CAP_GEN(priv->mdev, cqe_128_always) && cache_line_size() >= 128)
2186 MLX5_SET(cqc, cqc, cqe_sz, CQE_STRIDE_128_PAD);
f62b8bb8
AV
2187}
2188
db05815b
MM
2189void mlx5e_build_rx_cq_param(struct mlx5e_priv *priv,
2190 struct mlx5e_params *params,
2191 struct mlx5e_xsk_param *xsk,
2192 struct mlx5e_cq_param *param)
f62b8bb8 2193{
73281b78 2194 struct mlx5_core_dev *mdev = priv->mdev;
f62b8bb8 2195 void *cqc = param->cqc;
461017cb 2196 u8 log_cq_size;
f62b8bb8 2197
6a9764ef 2198 switch (params->rq_wq_type) {
461017cb 2199 case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ:
db05815b
MM
2200 log_cq_size = mlx5e_mpwqe_get_log_rq_size(params, xsk) +
2201 mlx5e_mpwqe_get_log_num_strides(mdev, params, xsk);
461017cb 2202 break;
99cbfa93 2203 default: /* MLX5_WQ_TYPE_CYCLIC */
73281b78 2204 log_cq_size = params->log_rq_mtu_frames;
461017cb
TT
2205 }
2206
2207 MLX5_SET(cqc, cqc, log_cq_size, log_cq_size);
6a9764ef 2208 if (MLX5E_GET_PFLAG(params, MLX5E_PFLAG_RX_CQE_COMPRESS)) {
7219ab34
TT
2209 MLX5_SET(cqc, cqc, mini_cqe_res_format, MLX5_CQE_FORMAT_CSUM);
2210 MLX5_SET(cqc, cqc, cqe_comp_en, 1);
2211 }
f62b8bb8
AV
2212
2213 mlx5e_build_common_cq_param(priv, param);
0088cbbc 2214 param->cq_period_mode = params->rx_cq_moderation.cq_period_mode;
f62b8bb8
AV
2215}
2216
db05815b
MM
2217void mlx5e_build_tx_cq_param(struct mlx5e_priv *priv,
2218 struct mlx5e_params *params,
2219 struct mlx5e_cq_param *param)
f62b8bb8
AV
2220{
2221 void *cqc = param->cqc;
2222
6a9764ef 2223 MLX5_SET(cqc, cqc, log_cq_size, params->log_sq_size);
f62b8bb8
AV
2224
2225 mlx5e_build_common_cq_param(priv, param);
0088cbbc 2226 param->cq_period_mode = params->tx_cq_moderation.cq_period_mode;
f62b8bb8
AV
2227}
2228
db05815b
MM
2229void mlx5e_build_ico_cq_param(struct mlx5e_priv *priv,
2230 u8 log_wq_size,
2231 struct mlx5e_cq_param *param)
d3c9bc27
TT
2232{
2233 void *cqc = param->cqc;
2234
2235 MLX5_SET(cqc, cqc, log_cq_size, log_wq_size);
2236
2237 mlx5e_build_common_cq_param(priv, param);
9908aa29 2238
c002bd52 2239 param->cq_period_mode = DIM_CQ_PERIOD_MODE_START_FROM_EQE;
d3c9bc27
TT
2240}
2241
db05815b
MM
2242void mlx5e_build_icosq_param(struct mlx5e_priv *priv,
2243 u8 log_wq_size,
2244 struct mlx5e_sq_param *param)
d3c9bc27
TT
2245{
2246 void *sqc = param->sqc;
2247 void *wq = MLX5_ADDR_OF(sqc, sqc, wq);
2248
2249 mlx5e_build_sq_param_common(priv, param);
2250
2251 MLX5_SET(wq, wq, log_wq_sz, log_wq_size);
bc77b240 2252 MLX5_SET(sqc, sqc, reg_umr, MLX5_CAP_ETH(priv->mdev, reg_umr_sq));
d3c9bc27
TT
2253}
2254
db05815b
MM
2255void mlx5e_build_xdpsq_param(struct mlx5e_priv *priv,
2256 struct mlx5e_params *params,
2257 struct mlx5e_sq_param *param)
b5503b99
SM
2258{
2259 void *sqc = param->sqc;
2260 void *wq = MLX5_ADDR_OF(sqc, sqc, wq);
2261
2262 mlx5e_build_sq_param_common(priv, param);
6a9764ef 2263 MLX5_SET(wq, wq, log_wq_sz, params->log_sq_size);
6277053a 2264 param->is_mpw = MLX5E_GET_PFLAG(params, MLX5E_PFLAG_XDP_TX_MPWQE);
b5503b99
SM
2265}
2266
fd9b4be8
TT
2267static u8 mlx5e_build_icosq_log_wq_sz(struct mlx5e_params *params,
2268 struct mlx5e_rq_param *rqp)
2269{
2270 switch (params->rq_wq_type) {
2271 case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ:
2272 return order_base_2(MLX5E_UMR_WQEBBS) +
2273 mlx5e_get_rq_log_wq_sz(rqp->rqc);
2274 default: /* MLX5_WQ_TYPE_CYCLIC */
2275 return MLX5E_PARAMS_MINIMUM_LOG_SQ_SIZE;
2276 }
2277}
2278
6a9764ef
SM
2279static void mlx5e_build_channel_param(struct mlx5e_priv *priv,
2280 struct mlx5e_params *params,
2281 struct mlx5e_channel_param *cparam)
f62b8bb8 2282{
fd9b4be8 2283 u8 icosq_log_wq_sz;
d3c9bc27 2284
db05815b 2285 mlx5e_build_rq_param(priv, params, NULL, &cparam->rq);
fd9b4be8
TT
2286
2287 icosq_log_wq_sz = mlx5e_build_icosq_log_wq_sz(params, &cparam->rq);
2288
6a9764ef
SM
2289 mlx5e_build_sq_param(priv, params, &cparam->sq);
2290 mlx5e_build_xdpsq_param(priv, params, &cparam->xdp_sq);
2291 mlx5e_build_icosq_param(priv, icosq_log_wq_sz, &cparam->icosq);
db05815b 2292 mlx5e_build_rx_cq_param(priv, params, NULL, &cparam->rx_cq);
6a9764ef
SM
2293 mlx5e_build_tx_cq_param(priv, params, &cparam->tx_cq);
2294 mlx5e_build_ico_cq_param(priv, icosq_log_wq_sz, &cparam->icosq_cq);
f62b8bb8
AV
2295}
2296
55c2503d
SM
2297int mlx5e_open_channels(struct mlx5e_priv *priv,
2298 struct mlx5e_channels *chs)
f62b8bb8 2299{
6b87663f 2300 struct mlx5e_channel_param *cparam;
03289b88 2301 int err = -ENOMEM;
f62b8bb8 2302 int i;
f62b8bb8 2303
6a9764ef 2304 chs->num = chs->params.num_channels;
03289b88 2305
ff9c852f 2306 chs->c = kcalloc(chs->num, sizeof(struct mlx5e_channel *), GFP_KERNEL);
ca11b798 2307 cparam = kvzalloc(sizeof(struct mlx5e_channel_param), GFP_KERNEL);
acc6c595
SM
2308 if (!chs->c || !cparam)
2309 goto err_free;
f62b8bb8 2310
6a9764ef 2311 mlx5e_build_channel_param(priv, &chs->params, cparam);
ff9c852f 2312 for (i = 0; i < chs->num; i++) {
db05815b
MM
2313 struct xdp_umem *umem = NULL;
2314
2315 if (chs->params.xdp_prog)
2316 umem = mlx5e_xsk_get_umem(&chs->params, chs->params.xsk, i);
2317
2318 err = mlx5e_open_channel(priv, i, &chs->params, cparam, umem, &chs->c[i]);
f62b8bb8
AV
2319 if (err)
2320 goto err_close_channels;
2321 }
2322
86d7e714
EBE
2323 if (!IS_ERR_OR_NULL(priv->tx_reporter))
2324 devlink_health_reporter_state_update(priv->tx_reporter,
2325 DEVLINK_HEALTH_REPORTER_STATE_HEALTHY);
2326
ca11b798 2327 kvfree(cparam);
f62b8bb8
AV
2328 return 0;
2329
2330err_close_channels:
2331 for (i--; i >= 0; i--)
ff9c852f 2332 mlx5e_close_channel(chs->c[i]);
f62b8bb8 2333
acc6c595 2334err_free:
ff9c852f 2335 kfree(chs->c);
ca11b798 2336 kvfree(cparam);
ff9c852f 2337 chs->num = 0;
f62b8bb8
AV
2338 return err;
2339}
2340
acc6c595 2341static void mlx5e_activate_channels(struct mlx5e_channels *chs)
f62b8bb8
AV
2342{
2343 int i;
2344
acc6c595
SM
2345 for (i = 0; i < chs->num; i++)
2346 mlx5e_activate_channel(chs->c[i]);
2347}
2348
f8ebecf2
MM
2349#define MLX5E_RQ_WQES_TIMEOUT 20000 /* msecs */
2350
acc6c595
SM
2351static int mlx5e_wait_channels_min_rx_wqes(struct mlx5e_channels *chs)
2352{
2353 int err = 0;
2354 int i;
2355
f8ebecf2
MM
2356 for (i = 0; i < chs->num; i++) {
2357 int timeout = err ? 0 : MLX5E_RQ_WQES_TIMEOUT;
2358
2359 err |= mlx5e_wait_for_min_rx_wqes(&chs->c[i]->rq, timeout);
db05815b
MM
2360
2361 /* Don't wait on the XSK RQ, because the newer xdpsock sample
2362 * doesn't provide any Fill Ring entries at the setup stage.
2363 */
f8ebecf2 2364 }
acc6c595 2365
1e7477ae 2366 return err ? -ETIMEDOUT : 0;
acc6c595
SM
2367}
2368
2369static void mlx5e_deactivate_channels(struct mlx5e_channels *chs)
2370{
2371 int i;
2372
2373 for (i = 0; i < chs->num; i++)
2374 mlx5e_deactivate_channel(chs->c[i]);
2375}
2376
55c2503d 2377void mlx5e_close_channels(struct mlx5e_channels *chs)
acc6c595
SM
2378{
2379 int i;
c3b7c5c9 2380
ff9c852f
SM
2381 for (i = 0; i < chs->num; i++)
2382 mlx5e_close_channel(chs->c[i]);
f62b8bb8 2383
ff9c852f
SM
2384 kfree(chs->c);
2385 chs->num = 0;
f62b8bb8
AV
2386}
2387
a5f97fee
SM
2388static int
2389mlx5e_create_rqt(struct mlx5e_priv *priv, int sz, struct mlx5e_rqt *rqt)
f62b8bb8
AV
2390{
2391 struct mlx5_core_dev *mdev = priv->mdev;
f62b8bb8
AV
2392 void *rqtc;
2393 int inlen;
2394 int err;
1da36696 2395 u32 *in;
a5f97fee 2396 int i;
f62b8bb8 2397
f62b8bb8 2398 inlen = MLX5_ST_SZ_BYTES(create_rqt_in) + sizeof(u32) * sz;
1b9a07ee 2399 in = kvzalloc(inlen, GFP_KERNEL);
f62b8bb8
AV
2400 if (!in)
2401 return -ENOMEM;
2402
2403 rqtc = MLX5_ADDR_OF(create_rqt_in, in, rqt_context);
2404
2405 MLX5_SET(rqtc, rqtc, rqt_actual_size, sz);
2406 MLX5_SET(rqtc, rqtc, rqt_max_size, sz);
2407
a5f97fee
SM
2408 for (i = 0; i < sz; i++)
2409 MLX5_SET(rqtc, rqtc, rq_num[i], priv->drop_rq.rqn);
2be6967c 2410
398f3351
HHZ
2411 err = mlx5_core_create_rqt(mdev, in, inlen, &rqt->rqtn);
2412 if (!err)
2413 rqt->enabled = true;
f62b8bb8
AV
2414
2415 kvfree(in);
1da36696
TT
2416 return err;
2417}
2418
cb67b832 2419void mlx5e_destroy_rqt(struct mlx5e_priv *priv, struct mlx5e_rqt *rqt)
1da36696 2420{
398f3351
HHZ
2421 rqt->enabled = false;
2422 mlx5_core_destroy_rqt(priv->mdev, rqt->rqtn);
1da36696
TT
2423}
2424
8f493ffd 2425int mlx5e_create_indirect_rqt(struct mlx5e_priv *priv)
6bfd390b
HHZ
2426{
2427 struct mlx5e_rqt *rqt = &priv->indir_rqt;
8f493ffd 2428 int err;
6bfd390b 2429
8f493ffd
SM
2430 err = mlx5e_create_rqt(priv, MLX5E_INDIR_RQT_SIZE, rqt);
2431 if (err)
2432 mlx5_core_warn(priv->mdev, "create indirect rqts failed, %d\n", err);
2433 return err;
6bfd390b
HHZ
2434}
2435
db05815b 2436int mlx5e_create_direct_rqts(struct mlx5e_priv *priv, struct mlx5e_tir *tirs)
1da36696 2437{
db05815b 2438 const int max_nch = mlx5e_get_netdev_max_channels(priv->netdev);
1da36696
TT
2439 int err;
2440 int ix;
2441
db05815b
MM
2442 for (ix = 0; ix < max_nch; ix++) {
2443 err = mlx5e_create_rqt(priv, 1 /*size */, &tirs[ix].rqt);
2444 if (unlikely(err))
1da36696
TT
2445 goto err_destroy_rqts;
2446 }
2447
2448 return 0;
2449
2450err_destroy_rqts:
db05815b 2451 mlx5_core_warn(priv->mdev, "create rqts failed, %d\n", err);
1da36696 2452 for (ix--; ix >= 0; ix--)
db05815b 2453 mlx5e_destroy_rqt(priv, &tirs[ix].rqt);
1da36696 2454
f62b8bb8
AV
2455 return err;
2456}
2457
db05815b 2458void mlx5e_destroy_direct_rqts(struct mlx5e_priv *priv, struct mlx5e_tir *tirs)
8f493ffd 2459{
db05815b 2460 const int max_nch = mlx5e_get_netdev_max_channels(priv->netdev);
8f493ffd
SM
2461 int i;
2462
db05815b
MM
2463 for (i = 0; i < max_nch; i++)
2464 mlx5e_destroy_rqt(priv, &tirs[i].rqt);
8f493ffd
SM
2465}
2466
a5f97fee
SM
2467static int mlx5e_rx_hash_fn(int hfunc)
2468{
2469 return (hfunc == ETH_RSS_HASH_TOP) ?
2470 MLX5_RX_HASH_FN_TOEPLITZ :
2471 MLX5_RX_HASH_FN_INVERTED_XOR8;
2472}
2473
3f6d08d1 2474int mlx5e_bits_invert(unsigned long a, int size)
a5f97fee
SM
2475{
2476 int inv = 0;
2477 int i;
2478
2479 for (i = 0; i < size; i++)
2480 inv |= (test_bit(size - i - 1, &a) ? 1 : 0) << i;
2481
2482 return inv;
2483}
2484
2485static void mlx5e_fill_rqt_rqns(struct mlx5e_priv *priv, int sz,
2486 struct mlx5e_redirect_rqt_param rrp, void *rqtc)
2487{
2488 int i;
2489
2490 for (i = 0; i < sz; i++) {
2491 u32 rqn;
2492
2493 if (rrp.is_rss) {
2494 int ix = i;
2495
2496 if (rrp.rss.hfunc == ETH_RSS_HASH_XOR)
2497 ix = mlx5e_bits_invert(i, ilog2(sz));
2498
bbeb53b8 2499 ix = priv->rss_params.indirection_rqt[ix];
a5f97fee
SM
2500 rqn = rrp.rss.channels->c[ix]->rq.rqn;
2501 } else {
2502 rqn = rrp.rqn;
2503 }
2504 MLX5_SET(rqtc, rqtc, rq_num[i], rqn);
2505 }
2506}
2507
2508int mlx5e_redirect_rqt(struct mlx5e_priv *priv, u32 rqtn, int sz,
2509 struct mlx5e_redirect_rqt_param rrp)
5c50368f
AS
2510{
2511 struct mlx5_core_dev *mdev = priv->mdev;
5c50368f
AS
2512 void *rqtc;
2513 int inlen;
1da36696 2514 u32 *in;
5c50368f
AS
2515 int err;
2516
5c50368f 2517 inlen = MLX5_ST_SZ_BYTES(modify_rqt_in) + sizeof(u32) * sz;
1b9a07ee 2518 in = kvzalloc(inlen, GFP_KERNEL);
5c50368f
AS
2519 if (!in)
2520 return -ENOMEM;
2521
2522 rqtc = MLX5_ADDR_OF(modify_rqt_in, in, ctx);
2523
2524 MLX5_SET(rqtc, rqtc, rqt_actual_size, sz);
5c50368f 2525 MLX5_SET(modify_rqt_in, in, bitmask.rqn_list, 1);
a5f97fee 2526 mlx5e_fill_rqt_rqns(priv, sz, rrp, rqtc);
1da36696 2527 err = mlx5_core_modify_rqt(mdev, rqtn, in, inlen);
5c50368f
AS
2528
2529 kvfree(in);
5c50368f
AS
2530 return err;
2531}
2532
a5f97fee
SM
2533static u32 mlx5e_get_direct_rqn(struct mlx5e_priv *priv, int ix,
2534 struct mlx5e_redirect_rqt_param rrp)
2535{
2536 if (!rrp.is_rss)
2537 return rrp.rqn;
2538
2539 if (ix >= rrp.rss.channels->num)
2540 return priv->drop_rq.rqn;
2541
2542 return rrp.rss.channels->c[ix]->rq.rqn;
2543}
2544
2545static void mlx5e_redirect_rqts(struct mlx5e_priv *priv,
2546 struct mlx5e_redirect_rqt_param rrp)
40ab6a6e 2547{
1da36696
TT
2548 u32 rqtn;
2549 int ix;
2550
398f3351 2551 if (priv->indir_rqt.enabled) {
a5f97fee 2552 /* RSS RQ table */
398f3351 2553 rqtn = priv->indir_rqt.rqtn;
a5f97fee 2554 mlx5e_redirect_rqt(priv, rqtn, MLX5E_INDIR_RQT_SIZE, rrp);
398f3351
HHZ
2555 }
2556
779d986d 2557 for (ix = 0; ix < mlx5e_get_netdev_max_channels(priv->netdev); ix++) {
a5f97fee
SM
2558 struct mlx5e_redirect_rqt_param direct_rrp = {
2559 .is_rss = false,
95632791
AM
2560 {
2561 .rqn = mlx5e_get_direct_rqn(priv, ix, rrp)
2562 },
a5f97fee
SM
2563 };
2564
2565 /* Direct RQ Tables */
398f3351
HHZ
2566 if (!priv->direct_tir[ix].rqt.enabled)
2567 continue;
a5f97fee 2568
398f3351 2569 rqtn = priv->direct_tir[ix].rqt.rqtn;
a5f97fee 2570 mlx5e_redirect_rqt(priv, rqtn, 1, direct_rrp);
1da36696 2571 }
40ab6a6e
AS
2572}
2573
a5f97fee
SM
2574static void mlx5e_redirect_rqts_to_channels(struct mlx5e_priv *priv,
2575 struct mlx5e_channels *chs)
2576{
2577 struct mlx5e_redirect_rqt_param rrp = {
2578 .is_rss = true,
95632791
AM
2579 {
2580 .rss = {
2581 .channels = chs,
bbeb53b8 2582 .hfunc = priv->rss_params.hfunc,
95632791
AM
2583 }
2584 },
a5f97fee
SM
2585 };
2586
2587 mlx5e_redirect_rqts(priv, rrp);
2588}
2589
2590static void mlx5e_redirect_rqts_to_drop(struct mlx5e_priv *priv)
2591{
2592 struct mlx5e_redirect_rqt_param drop_rrp = {
2593 .is_rss = false,
95632791
AM
2594 {
2595 .rqn = priv->drop_rq.rqn,
2596 },
a5f97fee
SM
2597 };
2598
2599 mlx5e_redirect_rqts(priv, drop_rrp);
2600}
2601
d930ac79
AL
2602static const struct mlx5e_tirc_config tirc_default_config[MLX5E_NUM_INDIR_TIRS] = {
2603 [MLX5E_TT_IPV4_TCP] = { .l3_prot_type = MLX5_L3_PROT_TYPE_IPV4,
2604 .l4_prot_type = MLX5_L4_PROT_TYPE_TCP,
2605 .rx_hash_fields = MLX5_HASH_IP_L4PORTS,
2606 },
2607 [MLX5E_TT_IPV6_TCP] = { .l3_prot_type = MLX5_L3_PROT_TYPE_IPV6,
2608 .l4_prot_type = MLX5_L4_PROT_TYPE_TCP,
2609 .rx_hash_fields = MLX5_HASH_IP_L4PORTS,
2610 },
2611 [MLX5E_TT_IPV4_UDP] = { .l3_prot_type = MLX5_L3_PROT_TYPE_IPV4,
2612 .l4_prot_type = MLX5_L4_PROT_TYPE_UDP,
2613 .rx_hash_fields = MLX5_HASH_IP_L4PORTS,
2614 },
2615 [MLX5E_TT_IPV6_UDP] = { .l3_prot_type = MLX5_L3_PROT_TYPE_IPV6,
2616 .l4_prot_type = MLX5_L4_PROT_TYPE_UDP,
2617 .rx_hash_fields = MLX5_HASH_IP_L4PORTS,
2618 },
2619 [MLX5E_TT_IPV4_IPSEC_AH] = { .l3_prot_type = MLX5_L3_PROT_TYPE_IPV4,
2620 .l4_prot_type = 0,
2621 .rx_hash_fields = MLX5_HASH_IP_IPSEC_SPI,
2622 },
2623 [MLX5E_TT_IPV6_IPSEC_AH] = { .l3_prot_type = MLX5_L3_PROT_TYPE_IPV6,
2624 .l4_prot_type = 0,
2625 .rx_hash_fields = MLX5_HASH_IP_IPSEC_SPI,
2626 },
2627 [MLX5E_TT_IPV4_IPSEC_ESP] = { .l3_prot_type = MLX5_L3_PROT_TYPE_IPV4,
2628 .l4_prot_type = 0,
2629 .rx_hash_fields = MLX5_HASH_IP_IPSEC_SPI,
2630 },
2631 [MLX5E_TT_IPV6_IPSEC_ESP] = { .l3_prot_type = MLX5_L3_PROT_TYPE_IPV6,
2632 .l4_prot_type = 0,
2633 .rx_hash_fields = MLX5_HASH_IP_IPSEC_SPI,
2634 },
2635 [MLX5E_TT_IPV4] = { .l3_prot_type = MLX5_L3_PROT_TYPE_IPV4,
2636 .l4_prot_type = 0,
2637 .rx_hash_fields = MLX5_HASH_IP,
2638 },
2639 [MLX5E_TT_IPV6] = { .l3_prot_type = MLX5_L3_PROT_TYPE_IPV6,
2640 .l4_prot_type = 0,
2641 .rx_hash_fields = MLX5_HASH_IP,
2642 },
2643};
2644
2645struct mlx5e_tirc_config mlx5e_tirc_get_default_config(enum mlx5e_traffic_types tt)
2646{
2647 return tirc_default_config[tt];
2648}
2649
6a9764ef 2650static void mlx5e_build_tir_ctx_lro(struct mlx5e_params *params, void *tirc)
5c50368f 2651{
6a9764ef 2652 if (!params->lro_en)
5c50368f
AS
2653 return;
2654
2655#define ROUGH_MAX_L2_L3_HDR_SZ 256
2656
2657 MLX5_SET(tirc, tirc, lro_enable_mask,
2658 MLX5_TIRC_LRO_ENABLE_MASK_IPV4_LRO |
2659 MLX5_TIRC_LRO_ENABLE_MASK_IPV6_LRO);
2660 MLX5_SET(tirc, tirc, lro_max_ip_payload_size,
0b77f230 2661 (MLX5E_PARAMS_DEFAULT_LRO_WQE_SZ - ROUGH_MAX_L2_L3_HDR_SZ) >> 8);
6a9764ef 2662 MLX5_SET(tirc, tirc, lro_timeout_period_usecs, params->lro_timeout);
5c50368f
AS
2663}
2664
bbeb53b8 2665void mlx5e_build_indir_tir_ctx_hash(struct mlx5e_rss_params *rss_params,
d930ac79 2666 const struct mlx5e_tirc_config *ttconfig,
7b3722fa 2667 void *tirc, bool inner)
bdfc028d 2668{
7b3722fa
GP
2669 void *hfso = inner ? MLX5_ADDR_OF(tirc, tirc, rx_hash_field_selector_inner) :
2670 MLX5_ADDR_OF(tirc, tirc, rx_hash_field_selector_outer);
a100ff3e 2671
bbeb53b8
AL
2672 MLX5_SET(tirc, tirc, rx_hash_fn, mlx5e_rx_hash_fn(rss_params->hfunc));
2673 if (rss_params->hfunc == ETH_RSS_HASH_TOP) {
bdfc028d
TT
2674 void *rss_key = MLX5_ADDR_OF(tirc, tirc,
2675 rx_hash_toeplitz_key);
2676 size_t len = MLX5_FLD_SZ_BYTES(tirc,
2677 rx_hash_toeplitz_key);
2678
2679 MLX5_SET(tirc, tirc, rx_hash_symmetric, 1);
bbeb53b8 2680 memcpy(rss_key, rss_params->toeplitz_hash_key, len);
bdfc028d 2681 }
d930ac79
AL
2682 MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
2683 ttconfig->l3_prot_type);
2684 MLX5_SET(rx_hash_field_select, hfso, l4_prot_type,
2685 ttconfig->l4_prot_type);
2686 MLX5_SET(rx_hash_field_select, hfso, selected_fields,
2687 ttconfig->rx_hash_fields);
bdfc028d
TT
2688}
2689
756c4160
AL
2690static void mlx5e_update_rx_hash_fields(struct mlx5e_tirc_config *ttconfig,
2691 enum mlx5e_traffic_types tt,
2692 u32 rx_hash_fields)
2693{
2694 *ttconfig = tirc_default_config[tt];
2695 ttconfig->rx_hash_fields = rx_hash_fields;
2696}
2697
080d1b17
AL
2698void mlx5e_modify_tirs_hash(struct mlx5e_priv *priv, void *in, int inlen)
2699{
2700 void *tirc = MLX5_ADDR_OF(modify_tir_in, in, ctx);
756c4160 2701 struct mlx5e_rss_params *rss = &priv->rss_params;
080d1b17
AL
2702 struct mlx5_core_dev *mdev = priv->mdev;
2703 int ctxlen = MLX5_ST_SZ_BYTES(tirc);
756c4160 2704 struct mlx5e_tirc_config ttconfig;
080d1b17
AL
2705 int tt;
2706
2707 MLX5_SET(modify_tir_in, in, bitmask.hash, 1);
2708
2709 for (tt = 0; tt < MLX5E_NUM_INDIR_TIRS; tt++) {
2710 memset(tirc, 0, ctxlen);
756c4160
AL
2711 mlx5e_update_rx_hash_fields(&ttconfig, tt,
2712 rss->rx_hash_fields[tt]);
2713 mlx5e_build_indir_tir_ctx_hash(rss, &ttconfig, tirc, false);
080d1b17
AL
2714 mlx5_core_modify_tir(mdev, priv->indir_tir[tt].tirn, in, inlen);
2715 }
2716
2717 if (!mlx5e_tunnel_inner_ft_supported(priv->mdev))
2718 return;
2719
2720 for (tt = 0; tt < MLX5E_NUM_INDIR_TIRS; tt++) {
2721 memset(tirc, 0, ctxlen);
756c4160
AL
2722 mlx5e_update_rx_hash_fields(&ttconfig, tt,
2723 rss->rx_hash_fields[tt]);
2724 mlx5e_build_indir_tir_ctx_hash(rss, &ttconfig, tirc, true);
080d1b17
AL
2725 mlx5_core_modify_tir(mdev, priv->inner_indir_tir[tt].tirn, in,
2726 inlen);
2727 }
2728}
2729
ab0394fe 2730static int mlx5e_modify_tirs_lro(struct mlx5e_priv *priv)
5c50368f
AS
2731{
2732 struct mlx5_core_dev *mdev = priv->mdev;
2733
2734 void *in;
2735 void *tirc;
2736 int inlen;
2737 int err;
ab0394fe 2738 int tt;
1da36696 2739 int ix;
5c50368f
AS
2740
2741 inlen = MLX5_ST_SZ_BYTES(modify_tir_in);
1b9a07ee 2742 in = kvzalloc(inlen, GFP_KERNEL);
5c50368f
AS
2743 if (!in)
2744 return -ENOMEM;
2745
2746 MLX5_SET(modify_tir_in, in, bitmask.lro, 1);
2747 tirc = MLX5_ADDR_OF(modify_tir_in, in, ctx);
2748
6a9764ef 2749 mlx5e_build_tir_ctx_lro(&priv->channels.params, tirc);
5c50368f 2750
1da36696 2751 for (tt = 0; tt < MLX5E_NUM_INDIR_TIRS; tt++) {
724b2aa1 2752 err = mlx5_core_modify_tir(mdev, priv->indir_tir[tt].tirn, in,
1da36696 2753 inlen);
ab0394fe 2754 if (err)
1da36696 2755 goto free_in;
ab0394fe 2756 }
5c50368f 2757
779d986d 2758 for (ix = 0; ix < mlx5e_get_netdev_max_channels(priv->netdev); ix++) {
1da36696
TT
2759 err = mlx5_core_modify_tir(mdev, priv->direct_tir[ix].tirn,
2760 in, inlen);
2761 if (err)
2762 goto free_in;
2763 }
2764
2765free_in:
5c50368f
AS
2766 kvfree(in);
2767
2768 return err;
2769}
2770
472a1e44
TT
2771static int mlx5e_set_mtu(struct mlx5_core_dev *mdev,
2772 struct mlx5e_params *params, u16 mtu)
40ab6a6e 2773{
472a1e44 2774 u16 hw_mtu = MLX5E_SW2HW_MTU(params, mtu);
40ab6a6e
AS
2775 int err;
2776
cd255eff 2777 err = mlx5_set_port_mtu(mdev, hw_mtu, 1);
40ab6a6e
AS
2778 if (err)
2779 return err;
2780
cd255eff
SM
2781 /* Update vport context MTU */
2782 mlx5_modify_nic_vport_mtu(mdev, hw_mtu);
2783 return 0;
2784}
40ab6a6e 2785
472a1e44
TT
2786static void mlx5e_query_mtu(struct mlx5_core_dev *mdev,
2787 struct mlx5e_params *params, u16 *mtu)
cd255eff 2788{
cd255eff
SM
2789 u16 hw_mtu = 0;
2790 int err;
40ab6a6e 2791
cd255eff
SM
2792 err = mlx5_query_nic_vport_mtu(mdev, &hw_mtu);
2793 if (err || !hw_mtu) /* fallback to port oper mtu */
2794 mlx5_query_port_oper_mtu(mdev, &hw_mtu, 1);
2795
472a1e44 2796 *mtu = MLX5E_HW2SW_MTU(params, hw_mtu);
cd255eff
SM
2797}
2798
d9ee0491 2799int mlx5e_set_dev_port_mtu(struct mlx5e_priv *priv)
cd255eff 2800{
472a1e44 2801 struct mlx5e_params *params = &priv->channels.params;
2e20a151 2802 struct net_device *netdev = priv->netdev;
472a1e44 2803 struct mlx5_core_dev *mdev = priv->mdev;
cd255eff
SM
2804 u16 mtu;
2805 int err;
2806
472a1e44 2807 err = mlx5e_set_mtu(mdev, params, params->sw_mtu);
cd255eff
SM
2808 if (err)
2809 return err;
40ab6a6e 2810
472a1e44
TT
2811 mlx5e_query_mtu(mdev, params, &mtu);
2812 if (mtu != params->sw_mtu)
cd255eff 2813 netdev_warn(netdev, "%s: VPort MTU %d is different than netdev mtu %d\n",
472a1e44 2814 __func__, mtu, params->sw_mtu);
40ab6a6e 2815
472a1e44 2816 params->sw_mtu = mtu;
40ab6a6e
AS
2817 return 0;
2818}
2819
6d7ee2ed
TT
2820void mlx5e_set_netdev_mtu_boundaries(struct mlx5e_priv *priv)
2821{
2822 struct mlx5e_params *params = &priv->channels.params;
2823 struct net_device *netdev = priv->netdev;
2824 struct mlx5_core_dev *mdev = priv->mdev;
2825 u16 max_mtu;
2826
2827 /* MTU range: 68 - hw-specific max */
2828 netdev->min_mtu = ETH_MIN_MTU;
2829
2830 mlx5_query_port_max_mtu(mdev, &max_mtu, 1);
2831 netdev->max_mtu = min_t(unsigned int, MLX5E_HW2SW_MTU(params, max_mtu),
2832 ETH_MAX_MTU);
2833}
2834
08fb1dac
SM
2835static void mlx5e_netdev_set_tcs(struct net_device *netdev)
2836{
2837 struct mlx5e_priv *priv = netdev_priv(netdev);
6a9764ef
SM
2838 int nch = priv->channels.params.num_channels;
2839 int ntc = priv->channels.params.num_tc;
08fb1dac
SM
2840 int tc;
2841
2842 netdev_reset_tc(netdev);
2843
2844 if (ntc == 1)
2845 return;
2846
2847 netdev_set_num_tc(netdev, ntc);
2848
7ccdd084
RS
2849 /* Map netdev TCs to offset 0
2850 * We have our own UP to TXQ mapping for QoS
2851 */
08fb1dac 2852 for (tc = 0; tc < ntc; tc++)
7ccdd084 2853 netdev_set_tc_queue(netdev, tc, nch, 0);
08fb1dac
SM
2854}
2855
8bfaf07f 2856static void mlx5e_build_tc2txq_maps(struct mlx5e_priv *priv)
acc6c595 2857{
779d986d 2858 int max_nch = mlx5e_get_netdev_max_channels(priv->netdev);
acc6c595
SM
2859 int i, tc;
2860
8bfaf07f 2861 for (i = 0; i < max_nch; i++)
acc6c595 2862 for (tc = 0; tc < priv->profile->max_tc; tc++)
8bfaf07f
EBE
2863 priv->channel_tc2txq[i][tc] = i + tc * max_nch;
2864}
2865
2866static void mlx5e_build_tx2sq_maps(struct mlx5e_priv *priv)
2867{
2868 struct mlx5e_channel *c;
2869 struct mlx5e_txqsq *sq;
2870 int i, tc;
acc6c595
SM
2871
2872 for (i = 0; i < priv->channels.num; i++) {
2873 c = priv->channels.c[i];
2874 for (tc = 0; tc < c->num_tc; tc++) {
2875 sq = &c->sq[tc];
2876 priv->txq2sq[sq->txq_ix] = sq;
2877 }
2878 }
2879}
2880
603f4a45 2881void mlx5e_activate_priv_channels(struct mlx5e_priv *priv)
acc6c595 2882{
9008ae07 2883 int num_txqs = priv->channels.num * priv->channels.params.num_tc;
db05815b 2884 int num_rxqs = priv->channels.num * MLX5E_NUM_RQ_GROUPS;
9008ae07
SM
2885 struct net_device *netdev = priv->netdev;
2886
2887 mlx5e_netdev_set_tcs(netdev);
053ee0a7 2888 netif_set_real_num_tx_queues(netdev, num_txqs);
db05815b 2889 netif_set_real_num_rx_queues(netdev, num_rxqs);
9008ae07 2890
8bfaf07f 2891 mlx5e_build_tx2sq_maps(priv);
acc6c595 2892 mlx5e_activate_channels(&priv->channels);
407e17b1 2893 mlx5e_xdp_tx_enable(priv);
acc6c595 2894 netif_tx_start_all_queues(priv->netdev);
9008ae07 2895
d9ee0491 2896 if (mlx5e_is_vport_rep(priv))
9008ae07
SM
2897 mlx5e_add_sqs_fwd_rules(priv);
2898
acc6c595 2899 mlx5e_wait_channels_min_rx_wqes(&priv->channels);
9008ae07 2900 mlx5e_redirect_rqts_to_channels(priv, &priv->channels);
db05815b
MM
2901
2902 mlx5e_xsk_redirect_rqts_to_channels(priv, &priv->channels);
acc6c595
SM
2903}
2904
603f4a45 2905void mlx5e_deactivate_priv_channels(struct mlx5e_priv *priv)
acc6c595 2906{
db05815b
MM
2907 mlx5e_xsk_redirect_rqts_to_drop(priv, &priv->channels);
2908
9008ae07
SM
2909 mlx5e_redirect_rqts_to_drop(priv);
2910
d9ee0491 2911 if (mlx5e_is_vport_rep(priv))
9008ae07
SM
2912 mlx5e_remove_sqs_fwd_rules(priv);
2913
acc6c595
SM
2914 /* FIXME: This is a W/A only for tx timeout watch dog false alarm when
2915 * polling for inactive tx queues.
2916 */
2917 netif_tx_stop_all_queues(priv->netdev);
2918 netif_tx_disable(priv->netdev);
407e17b1 2919 mlx5e_xdp_tx_disable(priv);
acc6c595
SM
2920 mlx5e_deactivate_channels(&priv->channels);
2921}
2922
877662e2
TT
2923static void mlx5e_switch_priv_channels(struct mlx5e_priv *priv,
2924 struct mlx5e_channels *new_chs,
2925 mlx5e_fp_hw_modify hw_modify)
55c2503d
SM
2926{
2927 struct net_device *netdev = priv->netdev;
2928 int new_num_txqs;
7ca42c80 2929 int carrier_ok;
877662e2 2930
55c2503d
SM
2931 new_num_txqs = new_chs->num * new_chs->params.num_tc;
2932
7ca42c80 2933 carrier_ok = netif_carrier_ok(netdev);
55c2503d
SM
2934 netif_carrier_off(netdev);
2935
2936 if (new_num_txqs < netdev->real_num_tx_queues)
2937 netif_set_real_num_tx_queues(netdev, new_num_txqs);
2938
2939 mlx5e_deactivate_priv_channels(priv);
2940 mlx5e_close_channels(&priv->channels);
2941
2942 priv->channels = *new_chs;
2943
2e20a151
SM
2944 /* New channels are ready to roll, modify HW settings if needed */
2945 if (hw_modify)
2946 hw_modify(priv);
2947
a90f88fe 2948 priv->profile->update_rx(priv);
55c2503d
SM
2949 mlx5e_activate_priv_channels(priv);
2950
7ca42c80
ES
2951 /* return carrier back if needed */
2952 if (carrier_ok)
2953 netif_carrier_on(netdev);
55c2503d
SM
2954}
2955
877662e2
TT
2956int mlx5e_safe_switch_channels(struct mlx5e_priv *priv,
2957 struct mlx5e_channels *new_chs,
2958 mlx5e_fp_hw_modify hw_modify)
2959{
2960 int err;
2961
2962 err = mlx5e_open_channels(priv, new_chs);
2963 if (err)
2964 return err;
2965
2966 mlx5e_switch_priv_channels(priv, new_chs, hw_modify);
2967 return 0;
2968}
2969
484c1ada
EBE
2970int mlx5e_safe_reopen_channels(struct mlx5e_priv *priv)
2971{
2972 struct mlx5e_channels new_channels = {};
2973
2974 new_channels.params = priv->channels.params;
2975 return mlx5e_safe_switch_channels(priv, &new_channels, NULL);
2976}
2977
237f258c 2978void mlx5e_timestamp_init(struct mlx5e_priv *priv)
7c39afb3
FD
2979{
2980 priv->tstamp.tx_type = HWTSTAMP_TX_OFF;
2981 priv->tstamp.rx_filter = HWTSTAMP_FILTER_NONE;
2982}
2983
40ab6a6e
AS
2984int mlx5e_open_locked(struct net_device *netdev)
2985{
2986 struct mlx5e_priv *priv = netdev_priv(netdev);
db05815b 2987 bool is_xdp = priv->channels.params.xdp_prog;
40ab6a6e
AS
2988 int err;
2989
2990 set_bit(MLX5E_STATE_OPENED, &priv->state);
db05815b
MM
2991 if (is_xdp)
2992 mlx5e_xdp_set_open(priv);
40ab6a6e 2993
ff9c852f 2994 err = mlx5e_open_channels(priv, &priv->channels);
acc6c595 2995 if (err)
343b29f3 2996 goto err_clear_state_opened_flag;
40ab6a6e 2997
a90f88fe 2998 priv->profile->update_rx(priv);
acc6c595 2999 mlx5e_activate_priv_channels(priv);
7ca42c80
ES
3000 if (priv->profile->update_carrier)
3001 priv->profile->update_carrier(priv);
be4891af 3002
cdeef2b1 3003 mlx5e_queue_update_stats(priv);
9b37b07f 3004 return 0;
343b29f3
AS
3005
3006err_clear_state_opened_flag:
db05815b
MM
3007 if (is_xdp)
3008 mlx5e_xdp_set_closed(priv);
343b29f3
AS
3009 clear_bit(MLX5E_STATE_OPENED, &priv->state);
3010 return err;
40ab6a6e
AS
3011}
3012
cb67b832 3013int mlx5e_open(struct net_device *netdev)
40ab6a6e
AS
3014{
3015 struct mlx5e_priv *priv = netdev_priv(netdev);
3016 int err;
3017
3018 mutex_lock(&priv->state_lock);
3019 err = mlx5e_open_locked(netdev);
63bfd399
EBE
3020 if (!err)
3021 mlx5_set_port_admin_status(priv->mdev, MLX5_PORT_UP);
40ab6a6e
AS
3022 mutex_unlock(&priv->state_lock);
3023
358aa5ce 3024 if (mlx5_vxlan_allowed(priv->mdev->vxlan))
a117f73d
SK
3025 udp_tunnel_get_rx_info(netdev);
3026
40ab6a6e
AS
3027 return err;
3028}
3029
3030int mlx5e_close_locked(struct net_device *netdev)
3031{
3032 struct mlx5e_priv *priv = netdev_priv(netdev);
3033
a1985740
AS
3034 /* May already be CLOSED in case a previous configuration operation
3035 * (e.g RX/TX queue size change) that involves close&open failed.
3036 */
3037 if (!test_bit(MLX5E_STATE_OPENED, &priv->state))
3038 return 0;
3039
db05815b
MM
3040 if (priv->channels.params.xdp_prog)
3041 mlx5e_xdp_set_closed(priv);
40ab6a6e
AS
3042 clear_bit(MLX5E_STATE_OPENED, &priv->state);
3043
40ab6a6e 3044 netif_carrier_off(priv->netdev);
acc6c595
SM
3045 mlx5e_deactivate_priv_channels(priv);
3046 mlx5e_close_channels(&priv->channels);
40ab6a6e
AS
3047
3048 return 0;
3049}
3050
cb67b832 3051int mlx5e_close(struct net_device *netdev)
40ab6a6e
AS
3052{
3053 struct mlx5e_priv *priv = netdev_priv(netdev);
3054 int err;
3055
26e59d80
MHY
3056 if (!netif_device_present(netdev))
3057 return -ENODEV;
3058
40ab6a6e 3059 mutex_lock(&priv->state_lock);
63bfd399 3060 mlx5_set_port_admin_status(priv->mdev, MLX5_PORT_DOWN);
40ab6a6e
AS
3061 err = mlx5e_close_locked(netdev);
3062 mutex_unlock(&priv->state_lock);
3063
3064 return err;
3065}
3066
a43b25da 3067static int mlx5e_alloc_drop_rq(struct mlx5_core_dev *mdev,
3b77235b
SM
3068 struct mlx5e_rq *rq,
3069 struct mlx5e_rq_param *param)
40ab6a6e 3070{
40ab6a6e
AS
3071 void *rqc = param->rqc;
3072 void *rqc_wq = MLX5_ADDR_OF(rqc, rqc, wq);
3073 int err;
3074
3075 param->wq.db_numa_node = param->wq.buf_numa_node;
3076
99cbfa93
TT
3077 err = mlx5_wq_cyc_create(mdev, &param->wq, rqc_wq, &rq->wqe.wq,
3078 &rq->wq_ctrl);
40ab6a6e
AS
3079 if (err)
3080 return err;
3081
0ddf5432
JDB
3082 /* Mark as unused given "Drop-RQ" packets never reach XDP */
3083 xdp_rxq_info_unused(&rq->xdp_rxq);
3084
a43b25da 3085 rq->mdev = mdev;
40ab6a6e
AS
3086
3087 return 0;
3088}
3089
a43b25da 3090static int mlx5e_alloc_drop_cq(struct mlx5_core_dev *mdev,
3b77235b
SM
3091 struct mlx5e_cq *cq,
3092 struct mlx5e_cq_param *param)
40ab6a6e 3093{
c42260f1
VP
3094 param->wq.buf_numa_node = dev_to_node(mdev->device);
3095 param->wq.db_numa_node = dev_to_node(mdev->device);
2f0db879 3096
95b6c6a5 3097 return mlx5e_alloc_cq_common(mdev, param, cq);
40ab6a6e
AS
3098}
3099
1462e48d
RD
3100int mlx5e_open_drop_rq(struct mlx5e_priv *priv,
3101 struct mlx5e_rq *drop_rq)
40ab6a6e 3102{
7cbaf9a3 3103 struct mlx5_core_dev *mdev = priv->mdev;
a43b25da
SM
3104 struct mlx5e_cq_param cq_param = {};
3105 struct mlx5e_rq_param rq_param = {};
3106 struct mlx5e_cq *cq = &drop_rq->cq;
40ab6a6e
AS
3107 int err;
3108
7cbaf9a3 3109 mlx5e_build_drop_rq_param(priv, &rq_param);
40ab6a6e 3110
a43b25da 3111 err = mlx5e_alloc_drop_cq(mdev, cq, &cq_param);
40ab6a6e
AS
3112 if (err)
3113 return err;
3114
3b77235b 3115 err = mlx5e_create_cq(cq, &cq_param);
40ab6a6e 3116 if (err)
3b77235b 3117 goto err_free_cq;
40ab6a6e 3118
a43b25da 3119 err = mlx5e_alloc_drop_rq(mdev, drop_rq, &rq_param);
40ab6a6e 3120 if (err)
3b77235b 3121 goto err_destroy_cq;
40ab6a6e 3122
a43b25da 3123 err = mlx5e_create_rq(drop_rq, &rq_param);
40ab6a6e 3124 if (err)
3b77235b 3125 goto err_free_rq;
40ab6a6e 3126
7cbaf9a3
MS
3127 err = mlx5e_modify_rq_state(drop_rq, MLX5_RQC_STATE_RST, MLX5_RQC_STATE_RDY);
3128 if (err)
3129 mlx5_core_warn(priv->mdev, "modify_rq_state failed, rx_if_down_packets won't be counted %d\n", err);
3130
40ab6a6e
AS
3131 return 0;
3132
3b77235b 3133err_free_rq:
a43b25da 3134 mlx5e_free_rq(drop_rq);
40ab6a6e
AS
3135
3136err_destroy_cq:
a43b25da 3137 mlx5e_destroy_cq(cq);
40ab6a6e 3138
3b77235b 3139err_free_cq:
a43b25da 3140 mlx5e_free_cq(cq);
3b77235b 3141
40ab6a6e
AS
3142 return err;
3143}
3144
1462e48d 3145void mlx5e_close_drop_rq(struct mlx5e_rq *drop_rq)
40ab6a6e 3146{
a43b25da
SM
3147 mlx5e_destroy_rq(drop_rq);
3148 mlx5e_free_rq(drop_rq);
3149 mlx5e_destroy_cq(&drop_rq->cq);
3150 mlx5e_free_cq(&drop_rq->cq);
40ab6a6e
AS
3151}
3152
2b257a6e 3153int mlx5e_create_tis(struct mlx5_core_dev *mdev, void *in, u32 *tisn)
40ab6a6e 3154{
40ab6a6e
AS
3155 void *tisc = MLX5_ADDR_OF(create_tis_in, in, ctx);
3156
b50d292b 3157 MLX5_SET(tisc, tisc, transport_domain, mdev->mlx5e_res.td.tdn);
db60b802 3158
d2ead1f3
TT
3159 if (MLX5_GET(tisc, tisc, tls_en))
3160 MLX5_SET(tisc, tisc, pd, mdev->mlx5e_res.pdn);
3161
db60b802
AH
3162 if (mlx5_lag_is_lacp_owner(mdev))
3163 MLX5_SET(tisc, tisc, strict_lag_tx_port_affinity, 1);
3164
2b257a6e 3165 return mlx5_core_create_tis(mdev, in, MLX5_ST_SZ_BYTES(create_tis_in), tisn);
40ab6a6e
AS
3166}
3167
5426a0b2 3168void mlx5e_destroy_tis(struct mlx5_core_dev *mdev, u32 tisn)
40ab6a6e 3169{
5426a0b2 3170 mlx5_core_destroy_tis(mdev, tisn);
40ab6a6e
AS
3171}
3172
cb67b832 3173int mlx5e_create_tises(struct mlx5e_priv *priv)
40ab6a6e
AS
3174{
3175 int err;
3176 int tc;
3177
6bfd390b 3178 for (tc = 0; tc < priv->profile->max_tc; tc++) {
2b257a6e
TT
3179 u32 in[MLX5_ST_SZ_DW(create_tis_in)] = {};
3180 void *tisc;
3181
3182 tisc = MLX5_ADDR_OF(create_tis_in, in, ctx);
3183
3184 MLX5_SET(tisc, tisc, prio, tc << 1);
3185
3186 err = mlx5e_create_tis(priv->mdev, in, &priv->tisn[tc]);
40ab6a6e
AS
3187 if (err)
3188 goto err_close_tises;
3189 }
3190
3191 return 0;
3192
3193err_close_tises:
3194 for (tc--; tc >= 0; tc--)
5426a0b2 3195 mlx5e_destroy_tis(priv->mdev, priv->tisn[tc]);
40ab6a6e
AS
3196
3197 return err;
3198}
3199
d9ee0491 3200static void mlx5e_cleanup_nic_tx(struct mlx5e_priv *priv)
40ab6a6e
AS
3201{
3202 int tc;
3203
de8650a8 3204 mlx5e_tx_reporter_destroy(priv);
6bfd390b 3205 for (tc = 0; tc < priv->profile->max_tc; tc++)
5426a0b2 3206 mlx5e_destroy_tis(priv->mdev, priv->tisn[tc]);
40ab6a6e
AS
3207}
3208
7306c274
TT
3209static void mlx5e_build_indir_tir_ctx_common(struct mlx5e_priv *priv,
3210 u32 rqtn, u32 *tirc)
f62b8bb8 3211{
b50d292b 3212 MLX5_SET(tirc, tirc, transport_domain, priv->mdev->mlx5e_res.td.tdn);
7306c274
TT
3213 MLX5_SET(tirc, tirc, disp_type, MLX5_TIRC_DISP_TYPE_INDIRECT);
3214 MLX5_SET(tirc, tirc, indirect_table, rqtn);
69dad68d
TT
3215 MLX5_SET(tirc, tirc, tunneled_offload_en,
3216 priv->channels.params.tunneled_offload_en);
3191e05f 3217
6a9764ef 3218 mlx5e_build_tir_ctx_lro(&priv->channels.params, tirc);
7306c274 3219}
f62b8bb8 3220
7306c274
TT
3221static void mlx5e_build_indir_tir_ctx(struct mlx5e_priv *priv,
3222 enum mlx5e_traffic_types tt,
3223 u32 *tirc)
3224{
3225 mlx5e_build_indir_tir_ctx_common(priv, priv->indir_rqt.rqtn, tirc);
bbeb53b8 3226 mlx5e_build_indir_tir_ctx_hash(&priv->rss_params,
d930ac79 3227 &tirc_default_config[tt], tirc, false);
f62b8bb8
AV
3228}
3229
6a9764ef 3230static void mlx5e_build_direct_tir_ctx(struct mlx5e_priv *priv, u32 rqtn, u32 *tirc)
f62b8bb8 3231{
7306c274 3232 mlx5e_build_indir_tir_ctx_common(priv, rqtn, tirc);
1da36696
TT
3233 MLX5_SET(tirc, tirc, rx_hash_fn, MLX5_RX_HASH_FN_INVERTED_XOR8);
3234}
3235
7306c274
TT
3236static void mlx5e_build_inner_indir_tir_ctx(struct mlx5e_priv *priv,
3237 enum mlx5e_traffic_types tt,
3238 u32 *tirc)
3239{
3240 mlx5e_build_indir_tir_ctx_common(priv, priv->indir_rqt.rqtn, tirc);
3241 mlx5e_build_indir_tir_ctx_hash(&priv->rss_params,
3242 &tirc_default_config[tt], tirc, true);
7306c274
TT
3243}
3244
46dc933c 3245int mlx5e_create_indirect_tirs(struct mlx5e_priv *priv, bool inner_ttc)
1da36696 3246{
724b2aa1 3247 struct mlx5e_tir *tir;
f62b8bb8
AV
3248 void *tirc;
3249 int inlen;
7b3722fa 3250 int i = 0;
f62b8bb8 3251 int err;
1da36696 3252 u32 *in;
1da36696 3253 int tt;
f62b8bb8
AV
3254
3255 inlen = MLX5_ST_SZ_BYTES(create_tir_in);
1b9a07ee 3256 in = kvzalloc(inlen, GFP_KERNEL);
f62b8bb8
AV
3257 if (!in)
3258 return -ENOMEM;
3259
1da36696
TT
3260 for (tt = 0; tt < MLX5E_NUM_INDIR_TIRS; tt++) {
3261 memset(in, 0, inlen);
724b2aa1 3262 tir = &priv->indir_tir[tt];
1da36696 3263 tirc = MLX5_ADDR_OF(create_tir_in, in, ctx);
6a9764ef 3264 mlx5e_build_indir_tir_ctx(priv, tt, tirc);
724b2aa1 3265 err = mlx5e_create_tir(priv->mdev, tir, in, inlen);
7b3722fa
GP
3266 if (err) {
3267 mlx5_core_warn(priv->mdev, "create indirect tirs failed, %d\n", err);
3268 goto err_destroy_inner_tirs;
3269 }
f62b8bb8
AV
3270 }
3271
46dc933c 3272 if (!inner_ttc || !mlx5e_tunnel_inner_ft_supported(priv->mdev))
7b3722fa
GP
3273 goto out;
3274
3275 for (i = 0; i < MLX5E_NUM_INDIR_TIRS; i++) {
3276 memset(in, 0, inlen);
3277 tir = &priv->inner_indir_tir[i];
3278 tirc = MLX5_ADDR_OF(create_tir_in, in, ctx);
3279 mlx5e_build_inner_indir_tir_ctx(priv, i, tirc);
3280 err = mlx5e_create_tir(priv->mdev, tir, in, inlen);
3281 if (err) {
3282 mlx5_core_warn(priv->mdev, "create inner indirect tirs failed, %d\n", err);
3283 goto err_destroy_inner_tirs;
3284 }
3285 }
3286
3287out:
6bfd390b
HHZ
3288 kvfree(in);
3289
3290 return 0;
3291
7b3722fa
GP
3292err_destroy_inner_tirs:
3293 for (i--; i >= 0; i--)
3294 mlx5e_destroy_tir(priv->mdev, &priv->inner_indir_tir[i]);
3295
6bfd390b
HHZ
3296 for (tt--; tt >= 0; tt--)
3297 mlx5e_destroy_tir(priv->mdev, &priv->indir_tir[tt]);
3298
3299 kvfree(in);
3300
3301 return err;
3302}
3303
db05815b 3304int mlx5e_create_direct_tirs(struct mlx5e_priv *priv, struct mlx5e_tir *tirs)
6bfd390b 3305{
db05815b 3306 const int max_nch = mlx5e_get_netdev_max_channels(priv->netdev);
6bfd390b
HHZ
3307 struct mlx5e_tir *tir;
3308 void *tirc;
3309 int inlen;
db05815b 3310 int err = 0;
6bfd390b
HHZ
3311 u32 *in;
3312 int ix;
3313
3314 inlen = MLX5_ST_SZ_BYTES(create_tir_in);
1b9a07ee 3315 in = kvzalloc(inlen, GFP_KERNEL);
6bfd390b
HHZ
3316 if (!in)
3317 return -ENOMEM;
3318
db05815b 3319 for (ix = 0; ix < max_nch; ix++) {
1da36696 3320 memset(in, 0, inlen);
db05815b 3321 tir = &tirs[ix];
1da36696 3322 tirc = MLX5_ADDR_OF(create_tir_in, in, ctx);
db05815b 3323 mlx5e_build_direct_tir_ctx(priv, tir->rqt.rqtn, tirc);
724b2aa1 3324 err = mlx5e_create_tir(priv->mdev, tir, in, inlen);
db05815b 3325 if (unlikely(err))
1da36696
TT
3326 goto err_destroy_ch_tirs;
3327 }
3328
db05815b 3329 goto out;
f62b8bb8 3330
1da36696 3331err_destroy_ch_tirs:
db05815b 3332 mlx5_core_warn(priv->mdev, "create tirs failed, %d\n", err);
1da36696 3333 for (ix--; ix >= 0; ix--)
db05815b 3334 mlx5e_destroy_tir(priv->mdev, &tirs[ix]);
1da36696 3335
db05815b 3336out:
1da36696 3337 kvfree(in);
f62b8bb8
AV
3338
3339 return err;
3340}
3341
46dc933c 3342void mlx5e_destroy_indirect_tirs(struct mlx5e_priv *priv, bool inner_ttc)
f62b8bb8
AV
3343{
3344 int i;
3345
1da36696 3346 for (i = 0; i < MLX5E_NUM_INDIR_TIRS; i++)
724b2aa1 3347 mlx5e_destroy_tir(priv->mdev, &priv->indir_tir[i]);
7b3722fa 3348
46dc933c 3349 if (!inner_ttc || !mlx5e_tunnel_inner_ft_supported(priv->mdev))
7b3722fa
GP
3350 return;
3351
3352 for (i = 0; i < MLX5E_NUM_INDIR_TIRS; i++)
3353 mlx5e_destroy_tir(priv->mdev, &priv->inner_indir_tir[i]);
f62b8bb8
AV
3354}
3355
db05815b 3356void mlx5e_destroy_direct_tirs(struct mlx5e_priv *priv, struct mlx5e_tir *tirs)
6bfd390b 3357{
db05815b 3358 const int max_nch = mlx5e_get_netdev_max_channels(priv->netdev);
6bfd390b
HHZ
3359 int i;
3360
db05815b
MM
3361 for (i = 0; i < max_nch; i++)
3362 mlx5e_destroy_tir(priv->mdev, &tirs[i]);
6bfd390b
HHZ
3363}
3364
102722fc
GE
3365static int mlx5e_modify_channels_scatter_fcs(struct mlx5e_channels *chs, bool enable)
3366{
3367 int err = 0;
3368 int i;
3369
3370 for (i = 0; i < chs->num; i++) {
3371 err = mlx5e_modify_rq_scatter_fcs(&chs->c[i]->rq, enable);
3372 if (err)
3373 return err;
3374 }
3375
3376 return 0;
3377}
3378
f6d96a20 3379static int mlx5e_modify_channels_vsd(struct mlx5e_channels *chs, bool vsd)
36350114
GP
3380{
3381 int err = 0;
3382 int i;
3383
ff9c852f
SM
3384 for (i = 0; i < chs->num; i++) {
3385 err = mlx5e_modify_rq_vsd(&chs->c[i]->rq, vsd);
36350114
GP
3386 if (err)
3387 return err;
3388 }
3389
3390 return 0;
3391}
3392
0cf0f6d3
JP
3393static int mlx5e_setup_tc_mqprio(struct net_device *netdev,
3394 struct tc_mqprio_qopt *mqprio)
08fb1dac
SM
3395{
3396 struct mlx5e_priv *priv = netdev_priv(netdev);
6f9485af 3397 struct mlx5e_channels new_channels = {};
0cf0f6d3 3398 u8 tc = mqprio->num_tc;
08fb1dac
SM
3399 int err = 0;
3400
0cf0f6d3
JP
3401 mqprio->hw = TC_MQPRIO_HW_OFFLOAD_TCS;
3402
08fb1dac
SM
3403 if (tc && tc != MLX5E_MAX_NUM_TC)
3404 return -EINVAL;
3405
3406 mutex_lock(&priv->state_lock);
3407
6f9485af
SM
3408 new_channels.params = priv->channels.params;
3409 new_channels.params.num_tc = tc ? tc : 1;
08fb1dac 3410
20b6a1c7 3411 if (!test_bit(MLX5E_STATE_OPENED, &priv->state)) {
6f9485af
SM
3412 priv->channels.params = new_channels.params;
3413 goto out;
3414 }
08fb1dac 3415
877662e2 3416 err = mlx5e_safe_switch_channels(priv, &new_channels, NULL);
6f9485af
SM
3417 if (err)
3418 goto out;
08fb1dac 3419
05909bab
EBE
3420 priv->max_opened_tc = max_t(u8, priv->max_opened_tc,
3421 new_channels.params.num_tc);
6f9485af 3422out:
08fb1dac 3423 mutex_unlock(&priv->state_lock);
08fb1dac
SM
3424 return err;
3425}
3426
e80541ec 3427#ifdef CONFIG_MLX5_ESWITCH
d6c862ba 3428static int mlx5e_setup_tc_cls_flower(struct mlx5e_priv *priv,
60bd4af8
OG
3429 struct tc_cls_flower_offload *cls_flower,
3430 int flags)
08fb1dac 3431{
0cf0f6d3
JP
3432 switch (cls_flower->command) {
3433 case TC_CLSFLOWER_REPLACE:
71d82d2a
OS
3434 return mlx5e_configure_flower(priv->netdev, priv, cls_flower,
3435 flags);
0cf0f6d3 3436 case TC_CLSFLOWER_DESTROY:
71d82d2a
OS
3437 return mlx5e_delete_flower(priv->netdev, priv, cls_flower,
3438 flags);
0cf0f6d3 3439 case TC_CLSFLOWER_STATS:
71d82d2a
OS
3440 return mlx5e_stats_flower(priv->netdev, priv, cls_flower,
3441 flags);
0cf0f6d3 3442 default:
a5fcf8a6 3443 return -EOPNOTSUPP;
0cf0f6d3
JP
3444 }
3445}
d6c862ba 3446
60bd4af8
OG
3447static int mlx5e_setup_tc_block_cb(enum tc_setup_type type, void *type_data,
3448 void *cb_priv)
d6c862ba
JP
3449{
3450 struct mlx5e_priv *priv = cb_priv;
3451
3452 switch (type) {
3453 case TC_SETUP_CLSFLOWER:
d9ee0491
OG
3454 return mlx5e_setup_tc_cls_flower(priv, type_data, MLX5E_TC_INGRESS |
3455 MLX5E_TC_NIC_OFFLOAD);
d6c862ba
JP
3456 default:
3457 return -EOPNOTSUPP;
3458 }
3459}
3460
3461static int mlx5e_setup_tc_block(struct net_device *dev,
3462 struct tc_block_offload *f)
3463{
3464 struct mlx5e_priv *priv = netdev_priv(dev);
3465
3466 if (f->binder_type != TCF_BLOCK_BINDER_TYPE_CLSACT_INGRESS)
3467 return -EOPNOTSUPP;
3468
3469 switch (f->command) {
3470 case TC_BLOCK_BIND:
3471 return tcf_block_cb_register(f->block, mlx5e_setup_tc_block_cb,
60513bd8 3472 priv, priv, f->extack);
d6c862ba
JP
3473 case TC_BLOCK_UNBIND:
3474 tcf_block_cb_unregister(f->block, mlx5e_setup_tc_block_cb,
3475 priv);
3476 return 0;
3477 default:
3478 return -EOPNOTSUPP;
3479 }
3480}
e80541ec 3481#endif
a5fcf8a6 3482
9afe9a53
OG
3483static int mlx5e_setup_tc(struct net_device *dev, enum tc_setup_type type,
3484 void *type_data)
0cf0f6d3 3485{
2572ac53 3486 switch (type) {
fde6af47 3487#ifdef CONFIG_MLX5_ESWITCH
d6c862ba
JP
3488 case TC_SETUP_BLOCK:
3489 return mlx5e_setup_tc_block(dev, type_data);
fde6af47 3490#endif
575ed7d3 3491 case TC_SETUP_QDISC_MQPRIO:
de4784ca 3492 return mlx5e_setup_tc_mqprio(dev, type_data);
e8f887ac
AV
3493 default:
3494 return -EOPNOTSUPP;
3495 }
08fb1dac
SM
3496}
3497
b832d4fd 3498void mlx5e_fold_sw_stats64(struct mlx5e_priv *priv, struct rtnl_link_stats64 *s)
9659e49a
SM
3499{
3500 int i;
3501
3502 for (i = 0; i < mlx5e_get_netdev_max_channels(priv->netdev); i++) {
3503 struct mlx5e_channel_stats *channel_stats = &priv->channel_stats[i];
db05815b 3504 struct mlx5e_rq_stats *xskrq_stats = &channel_stats->xskrq;
9659e49a
SM
3505 struct mlx5e_rq_stats *rq_stats = &channel_stats->rq;
3506 int j;
3507
db05815b
MM
3508 s->rx_packets += rq_stats->packets + xskrq_stats->packets;
3509 s->rx_bytes += rq_stats->bytes + xskrq_stats->bytes;
9659e49a
SM
3510
3511 for (j = 0; j < priv->max_opened_tc; j++) {
3512 struct mlx5e_sq_stats *sq_stats = &channel_stats->sq[j];
3513
3514 s->tx_packets += sq_stats->packets;
3515 s->tx_bytes += sq_stats->bytes;
3516 s->tx_dropped += sq_stats->dropped;
3517 }
3518 }
3519}
3520
d9ee0491 3521void
f62b8bb8
AV
3522mlx5e_get_stats(struct net_device *dev, struct rtnl_link_stats64 *stats)
3523{
3524 struct mlx5e_priv *priv = netdev_priv(dev);
3525 struct mlx5e_vport_stats *vstats = &priv->stats.vport;
269e6b3a 3526 struct mlx5e_pport_stats *pstats = &priv->stats.pport;
f62b8bb8 3527
5c7e8bbb
ED
3528 if (!mlx5e_monitor_counter_supported(priv)) {
3529 /* update HW stats in background for next time */
3530 mlx5e_queue_update_stats(priv);
3531 }
ed56c519 3532
370bad0f
OG
3533 if (mlx5e_is_uplink_rep(priv)) {
3534 stats->rx_packets = PPORT_802_3_GET(pstats, a_frames_received_ok);
3535 stats->rx_bytes = PPORT_802_3_GET(pstats, a_octets_received_ok);
3536 stats->tx_packets = PPORT_802_3_GET(pstats, a_frames_transmitted_ok);
3537 stats->tx_bytes = PPORT_802_3_GET(pstats, a_octets_transmitted_ok);
3538 } else {
9659e49a 3539 mlx5e_fold_sw_stats64(priv, stats);
370bad0f 3540 }
269e6b3a
GP
3541
3542 stats->rx_dropped = priv->stats.qcnt.rx_out_of_buffer;
269e6b3a
GP
3543
3544 stats->rx_length_errors =
9218b44d
GP
3545 PPORT_802_3_GET(pstats, a_in_range_length_errors) +
3546 PPORT_802_3_GET(pstats, a_out_of_range_length_field) +
3547 PPORT_802_3_GET(pstats, a_frame_too_long_errors);
269e6b3a 3548 stats->rx_crc_errors =
9218b44d
GP
3549 PPORT_802_3_GET(pstats, a_frame_check_sequence_errors);
3550 stats->rx_frame_errors = PPORT_802_3_GET(pstats, a_alignment_errors);
3551 stats->tx_aborted_errors = PPORT_2863_GET(pstats, if_out_discards);
269e6b3a
GP
3552 stats->rx_errors = stats->rx_length_errors + stats->rx_crc_errors +
3553 stats->rx_frame_errors;
3554 stats->tx_errors = stats->tx_aborted_errors + stats->tx_carrier_errors;
3555
3556 /* vport multicast also counts packets that are dropped due to steering
3557 * or rx out of buffer
3558 */
9218b44d
GP
3559 stats->multicast =
3560 VPORT_COUNTER_GET(vstats, received_eth_multicast.packets);
f62b8bb8
AV
3561}
3562
3563static void mlx5e_set_rx_mode(struct net_device *dev)
3564{
3565 struct mlx5e_priv *priv = netdev_priv(dev);
3566
7bb29755 3567 queue_work(priv->wq, &priv->set_rx_mode_work);
f62b8bb8
AV
3568}
3569
3570static int mlx5e_set_mac(struct net_device *netdev, void *addr)
3571{
3572 struct mlx5e_priv *priv = netdev_priv(netdev);
3573 struct sockaddr *saddr = addr;
3574
3575 if (!is_valid_ether_addr(saddr->sa_data))
3576 return -EADDRNOTAVAIL;
3577
3578 netif_addr_lock_bh(netdev);
3579 ether_addr_copy(netdev->dev_addr, saddr->sa_data);
3580 netif_addr_unlock_bh(netdev);
3581
7bb29755 3582 queue_work(priv->wq, &priv->set_rx_mode_work);
f62b8bb8
AV
3583
3584 return 0;
3585}
3586
75b81ce7 3587#define MLX5E_SET_FEATURE(features, feature, enable) \
0e405443
GP
3588 do { \
3589 if (enable) \
75b81ce7 3590 *features |= feature; \
0e405443 3591 else \
75b81ce7 3592 *features &= ~feature; \
0e405443
GP
3593 } while (0)
3594
3595typedef int (*mlx5e_feature_handler)(struct net_device *netdev, bool enable);
3596
3597static int set_feature_lro(struct net_device *netdev, bool enable)
f62b8bb8
AV
3598{
3599 struct mlx5e_priv *priv = netdev_priv(netdev);
619a8f2a 3600 struct mlx5_core_dev *mdev = priv->mdev;
2e20a151 3601 struct mlx5e_channels new_channels = {};
619a8f2a 3602 struct mlx5e_params *old_params;
2e20a151
SM
3603 int err = 0;
3604 bool reset;
f62b8bb8
AV
3605
3606 mutex_lock(&priv->state_lock);
f62b8bb8 3607
db05815b
MM
3608 if (enable && priv->xsk.refcnt) {
3609 netdev_warn(netdev, "LRO is incompatible with AF_XDP (%hu XSKs are active)\n",
3610 priv->xsk.refcnt);
3611 err = -EINVAL;
3612 goto out;
3613 }
3614
619a8f2a 3615 old_params = &priv->channels.params;
6c3a823e
TT
3616 if (enable && !MLX5E_GET_PFLAG(old_params, MLX5E_PFLAG_RX_STRIDING_RQ)) {
3617 netdev_warn(netdev, "can't set LRO with legacy RQ\n");
3618 err = -EINVAL;
3619 goto out;
3620 }
3621
619a8f2a 3622 reset = test_bit(MLX5E_STATE_OPENED, &priv->state);
98e81b0a 3623
619a8f2a 3624 new_channels.params = *old_params;
2e20a151
SM
3625 new_channels.params.lro_en = enable;
3626
99cbfa93 3627 if (old_params->rq_wq_type != MLX5_WQ_TYPE_CYCLIC) {
db05815b
MM
3628 if (mlx5e_rx_mpwqe_is_linear_skb(mdev, old_params, NULL) ==
3629 mlx5e_rx_mpwqe_is_linear_skb(mdev, &new_channels.params, NULL))
619a8f2a
TT
3630 reset = false;
3631 }
3632
2e20a151 3633 if (!reset) {
619a8f2a 3634 *old_params = new_channels.params;
2e20a151
SM
3635 err = mlx5e_modify_tirs_lro(priv);
3636 goto out;
98e81b0a 3637 }
f62b8bb8 3638
877662e2 3639 err = mlx5e_safe_switch_channels(priv, &new_channels, mlx5e_modify_tirs_lro);
2e20a151 3640out:
9b37b07f 3641 mutex_unlock(&priv->state_lock);
0e405443
GP
3642 return err;
3643}
3644
2b52a283 3645static int set_feature_cvlan_filter(struct net_device *netdev, bool enable)
0e405443
GP
3646{
3647 struct mlx5e_priv *priv = netdev_priv(netdev);
3648
3649 if (enable)
2b52a283 3650 mlx5e_enable_cvlan_filter(priv);
0e405443 3651 else
2b52a283 3652 mlx5e_disable_cvlan_filter(priv);
0e405443
GP
3653
3654 return 0;
3655}
3656
077ecd78 3657#ifdef CONFIG_MLX5_ESWITCH
0e405443
GP
3658static int set_feature_tc_num_filters(struct net_device *netdev, bool enable)
3659{
3660 struct mlx5e_priv *priv = netdev_priv(netdev);
f62b8bb8 3661
d9ee0491 3662 if (!enable && mlx5e_tc_num_filters(priv, MLX5E_TC_NIC_OFFLOAD)) {
e8f887ac
AV
3663 netdev_err(netdev,
3664 "Active offloaded tc filters, can't turn hw_tc_offload off\n");
3665 return -EINVAL;
3666 }
3667
0e405443
GP
3668 return 0;
3669}
077ecd78 3670#endif
0e405443 3671
94cb1ebb
EBE
3672static int set_feature_rx_all(struct net_device *netdev, bool enable)
3673{
3674 struct mlx5e_priv *priv = netdev_priv(netdev);
3675 struct mlx5_core_dev *mdev = priv->mdev;
3676
3677 return mlx5_set_port_fcs(mdev, !enable);
3678}
3679
102722fc
GE
3680static int set_feature_rx_fcs(struct net_device *netdev, bool enable)
3681{
3682 struct mlx5e_priv *priv = netdev_priv(netdev);
3683 int err;
3684
3685 mutex_lock(&priv->state_lock);
3686
3687 priv->channels.params.scatter_fcs_en = enable;
3688 err = mlx5e_modify_channels_scatter_fcs(&priv->channels, enable);
3689 if (err)
3690 priv->channels.params.scatter_fcs_en = !enable;
3691
3692 mutex_unlock(&priv->state_lock);
3693
3694 return err;
3695}
3696
36350114
GP
3697static int set_feature_rx_vlan(struct net_device *netdev, bool enable)
3698{
3699 struct mlx5e_priv *priv = netdev_priv(netdev);
ff9c852f 3700 int err = 0;
36350114
GP
3701
3702 mutex_lock(&priv->state_lock);
3703
6a9764ef 3704 priv->channels.params.vlan_strip_disable = !enable;
ff9c852f
SM
3705 if (!test_bit(MLX5E_STATE_OPENED, &priv->state))
3706 goto unlock;
3707
3708 err = mlx5e_modify_channels_vsd(&priv->channels, !enable);
36350114 3709 if (err)
6a9764ef 3710 priv->channels.params.vlan_strip_disable = enable;
36350114 3711
ff9c852f 3712unlock:
36350114
GP
3713 mutex_unlock(&priv->state_lock);
3714
3715 return err;
3716}
3717
ec080045 3718#ifdef CONFIG_MLX5_EN_ARFS
45bf454a
MG
3719static int set_feature_arfs(struct net_device *netdev, bool enable)
3720{
3721 struct mlx5e_priv *priv = netdev_priv(netdev);
3722 int err;
3723
3724 if (enable)
3725 err = mlx5e_arfs_enable(priv);
3726 else
3727 err = mlx5e_arfs_disable(priv);
3728
3729 return err;
3730}
3731#endif
3732
0e405443 3733static int mlx5e_handle_feature(struct net_device *netdev,
75b81ce7 3734 netdev_features_t *features,
0e405443
GP
3735 netdev_features_t wanted_features,
3736 netdev_features_t feature,
3737 mlx5e_feature_handler feature_handler)
3738{
3739 netdev_features_t changes = wanted_features ^ netdev->features;
3740 bool enable = !!(wanted_features & feature);
3741 int err;
3742
3743 if (!(changes & feature))
3744 return 0;
3745
3746 err = feature_handler(netdev, enable);
3747 if (err) {
b20eab15
GP
3748 netdev_err(netdev, "%s feature %pNF failed, err %d\n",
3749 enable ? "Enable" : "Disable", &feature, err);
0e405443
GP
3750 return err;
3751 }
3752
75b81ce7 3753 MLX5E_SET_FEATURE(features, feature, enable);
0e405443
GP
3754 return 0;
3755}
3756
d3cbd425 3757int mlx5e_set_features(struct net_device *netdev, netdev_features_t features)
0e405443 3758{
75b81ce7 3759 netdev_features_t oper_features = netdev->features;
be0f780b
GP
3760 int err = 0;
3761
3762#define MLX5E_HANDLE_FEATURE(feature, handler) \
3763 mlx5e_handle_feature(netdev, &oper_features, features, feature, handler)
0e405443 3764
be0f780b
GP
3765 err |= MLX5E_HANDLE_FEATURE(NETIF_F_LRO, set_feature_lro);
3766 err |= MLX5E_HANDLE_FEATURE(NETIF_F_HW_VLAN_CTAG_FILTER,
2b52a283 3767 set_feature_cvlan_filter);
077ecd78 3768#ifdef CONFIG_MLX5_ESWITCH
be0f780b 3769 err |= MLX5E_HANDLE_FEATURE(NETIF_F_HW_TC, set_feature_tc_num_filters);
077ecd78 3770#endif
be0f780b
GP
3771 err |= MLX5E_HANDLE_FEATURE(NETIF_F_RXALL, set_feature_rx_all);
3772 err |= MLX5E_HANDLE_FEATURE(NETIF_F_RXFCS, set_feature_rx_fcs);
3773 err |= MLX5E_HANDLE_FEATURE(NETIF_F_HW_VLAN_CTAG_RX, set_feature_rx_vlan);
ec080045 3774#ifdef CONFIG_MLX5_EN_ARFS
be0f780b 3775 err |= MLX5E_HANDLE_FEATURE(NETIF_F_NTUPLE, set_feature_arfs);
45bf454a 3776#endif
0e405443 3777
75b81ce7
GP
3778 if (err) {
3779 netdev->features = oper_features;
3780 return -EINVAL;
3781 }
3782
3783 return 0;
f62b8bb8
AV
3784}
3785
7d92d580
GP
3786static netdev_features_t mlx5e_fix_features(struct net_device *netdev,
3787 netdev_features_t features)
3788{
3789 struct mlx5e_priv *priv = netdev_priv(netdev);
6c3a823e 3790 struct mlx5e_params *params;
7d92d580
GP
3791
3792 mutex_lock(&priv->state_lock);
6c3a823e 3793 params = &priv->channels.params;
7d92d580
GP
3794 if (!bitmap_empty(priv->fs.vlan.active_svlans, VLAN_N_VID)) {
3795 /* HW strips the outer C-tag header, this is a problem
3796 * for S-tag traffic.
3797 */
3798 features &= ~NETIF_F_HW_VLAN_CTAG_RX;
6c3a823e 3799 if (!params->vlan_strip_disable)
7d92d580
GP
3800 netdev_warn(netdev, "Dropping C-tag vlan stripping offload due to S-tag vlan\n");
3801 }
6c3a823e
TT
3802 if (!MLX5E_GET_PFLAG(params, MLX5E_PFLAG_RX_STRIDING_RQ)) {
3803 features &= ~NETIF_F_LRO;
3804 if (params->lro_en)
3805 netdev_warn(netdev, "Disabling LRO, not supported in legacy RQ\n");
3806 }
3807
c0194e2d
SM
3808 if (MLX5E_GET_PFLAG(params, MLX5E_PFLAG_RX_CQE_COMPRESS)) {
3809 features &= ~NETIF_F_RXHASH;
3810 if (netdev->features & NETIF_F_RXHASH)
3811 netdev_warn(netdev, "Disabling rxhash, not supported when CQE compress is active\n");
3812 }
3813
7d92d580
GP
3814 mutex_unlock(&priv->state_lock);
3815
3816 return features;
3817}
3818
db05815b
MM
3819static bool mlx5e_xsk_validate_mtu(struct net_device *netdev,
3820 struct mlx5e_channels *chs,
3821 struct mlx5e_params *new_params,
3822 struct mlx5_core_dev *mdev)
3823{
3824 u16 ix;
3825
3826 for (ix = 0; ix < chs->params.num_channels; ix++) {
3827 struct xdp_umem *umem = mlx5e_xsk_get_umem(&chs->params, chs->params.xsk, ix);
3828 struct mlx5e_xsk_param xsk;
3829
3830 if (!umem)
3831 continue;
3832
3833 mlx5e_build_xsk_param(umem, &xsk);
3834
3835 if (!mlx5e_validate_xsk_param(new_params, &xsk, mdev)) {
3836 u32 hr = mlx5e_get_linear_rq_headroom(new_params, &xsk);
3837 int max_mtu_frame, max_mtu_page, max_mtu;
3838
3839 /* Two criteria must be met:
3840 * 1. HW MTU + all headrooms <= XSK frame size.
3841 * 2. Size of SKBs allocated on XDP_PASS <= PAGE_SIZE.
3842 */
3843 max_mtu_frame = MLX5E_HW2SW_MTU(new_params, xsk.chunk_size - hr);
3844 max_mtu_page = mlx5e_xdp_max_mtu(new_params, &xsk);
3845 max_mtu = min(max_mtu_frame, max_mtu_page);
3846
3847 netdev_err(netdev, "MTU %d is too big for an XSK running on channel %hu. Try MTU <= %d\n",
3848 new_params->sw_mtu, ix, max_mtu);
3849 return false;
3850 }
3851 }
3852
3853 return true;
3854}
3855
250a42b6
AN
3856int mlx5e_change_mtu(struct net_device *netdev, int new_mtu,
3857 change_hw_mtu_cb set_mtu_cb)
f62b8bb8
AV
3858{
3859 struct mlx5e_priv *priv = netdev_priv(netdev);
2e20a151 3860 struct mlx5e_channels new_channels = {};
472a1e44 3861 struct mlx5e_params *params;
98e81b0a 3862 int err = 0;
506753b0 3863 bool reset;
f62b8bb8 3864
f62b8bb8 3865 mutex_lock(&priv->state_lock);
98e81b0a 3866
472a1e44 3867 params = &priv->channels.params;
506753b0 3868
73281b78 3869 reset = !params->lro_en;
2e20a151 3870 reset = reset && test_bit(MLX5E_STATE_OPENED, &priv->state);
98e81b0a 3871
73281b78
TT
3872 new_channels.params = *params;
3873 new_channels.params.sw_mtu = new_mtu;
3874
a26a5bdf 3875 if (params->xdp_prog &&
db05815b 3876 !mlx5e_rx_is_linear_skb(&new_channels.params, NULL)) {
a26a5bdf 3877 netdev_err(netdev, "MTU(%d) > %d is not allowed while XDP enabled\n",
a011b49f 3878 new_mtu, mlx5e_xdp_max_mtu(params, NULL));
a26a5bdf
TT
3879 err = -EINVAL;
3880 goto out;
3881 }
3882
db05815b
MM
3883 if (priv->xsk.refcnt &&
3884 !mlx5e_xsk_validate_mtu(netdev, &priv->channels,
3885 &new_channels.params, priv->mdev)) {
a26a5bdf
TT
3886 err = -EINVAL;
3887 goto out;
3888 }
3889
99cbfa93 3890 if (params->rq_wq_type == MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ) {
db05815b
MM
3891 bool is_linear = mlx5e_rx_mpwqe_is_linear_skb(priv->mdev,
3892 &new_channels.params,
3893 NULL);
3894 u8 ppw_old = mlx5e_mpwqe_log_pkts_per_wqe(params, NULL);
3895 u8 ppw_new = mlx5e_mpwqe_log_pkts_per_wqe(&new_channels.params, NULL);
3896
3897 /* If XSK is active, XSK RQs are linear. */
3898 is_linear |= priv->xsk.refcnt;
73281b78 3899
db05815b 3900 /* Always reset in linear mode - hw_mtu is used in data path. */
0073c8f7 3901 reset = reset && (is_linear || (ppw_old != ppw_new));
73281b78
TT
3902 }
3903
2e20a151 3904 if (!reset) {
472a1e44 3905 params->sw_mtu = new_mtu;
eacecf27
AN
3906 if (set_mtu_cb)
3907 set_mtu_cb(priv);
472a1e44 3908 netdev->mtu = params->sw_mtu;
2e20a151
SM
3909 goto out;
3910 }
98e81b0a 3911
877662e2 3912 err = mlx5e_safe_switch_channels(priv, &new_channels, set_mtu_cb);
472a1e44 3913 if (err)
2e20a151 3914 goto out;
2e20a151 3915
472a1e44 3916 netdev->mtu = new_channels.params.sw_mtu;
f62b8bb8 3917
2e20a151
SM
3918out:
3919 mutex_unlock(&priv->state_lock);
f62b8bb8
AV
3920 return err;
3921}
3922
250a42b6
AN
3923static int mlx5e_change_nic_mtu(struct net_device *netdev, int new_mtu)
3924{
3925 return mlx5e_change_mtu(netdev, new_mtu, mlx5e_set_dev_port_mtu);
3926}
3927
7c39afb3
FD
3928int mlx5e_hwstamp_set(struct mlx5e_priv *priv, struct ifreq *ifr)
3929{
3930 struct hwtstamp_config config;
3931 int err;
3932
6dbc80ca
MS
3933 if (!MLX5_CAP_GEN(priv->mdev, device_frequency_khz) ||
3934 (mlx5_clock_get_ptp_index(priv->mdev) == -1))
7c39afb3
FD
3935 return -EOPNOTSUPP;
3936
3937 if (copy_from_user(&config, ifr->ifr_data, sizeof(config)))
3938 return -EFAULT;
3939
3940 /* TX HW timestamp */
3941 switch (config.tx_type) {
3942 case HWTSTAMP_TX_OFF:
3943 case HWTSTAMP_TX_ON:
3944 break;
3945 default:
3946 return -ERANGE;
3947 }
3948
3949 mutex_lock(&priv->state_lock);
3950 /* RX HW timestamp */
3951 switch (config.rx_filter) {
3952 case HWTSTAMP_FILTER_NONE:
3953 /* Reset CQE compression to Admin default */
3954 mlx5e_modify_rx_cqe_compression_locked(priv, priv->channels.params.rx_cqe_compress_def);
3955 break;
3956 case HWTSTAMP_FILTER_ALL:
3957 case HWTSTAMP_FILTER_SOME:
3958 case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
3959 case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
3960 case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
3961 case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
3962 case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
3963 case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
3964 case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
3965 case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
3966 case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
3967 case HWTSTAMP_FILTER_PTP_V2_EVENT:
3968 case HWTSTAMP_FILTER_PTP_V2_SYNC:
3969 case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
3970 case HWTSTAMP_FILTER_NTP_ALL:
3971 /* Disable CQE compression */
3972 netdev_warn(priv->netdev, "Disabling cqe compression");
3973 err = mlx5e_modify_rx_cqe_compression_locked(priv, false);
3974 if (err) {
3975 netdev_err(priv->netdev, "Failed disabling cqe compression err=%d\n", err);
3976 mutex_unlock(&priv->state_lock);
3977 return err;
3978 }
3979 config.rx_filter = HWTSTAMP_FILTER_ALL;
3980 break;
3981 default:
3982 mutex_unlock(&priv->state_lock);
3983 return -ERANGE;
3984 }
3985
3986 memcpy(&priv->tstamp, &config, sizeof(config));
3987 mutex_unlock(&priv->state_lock);
3988
c0194e2d
SM
3989 /* might need to fix some features */
3990 netdev_update_features(priv->netdev);
3991
7c39afb3
FD
3992 return copy_to_user(ifr->ifr_data, &config,
3993 sizeof(config)) ? -EFAULT : 0;
3994}
3995
3996int mlx5e_hwstamp_get(struct mlx5e_priv *priv, struct ifreq *ifr)
3997{
3998 struct hwtstamp_config *cfg = &priv->tstamp;
3999
4000 if (!MLX5_CAP_GEN(priv->mdev, device_frequency_khz))
4001 return -EOPNOTSUPP;
4002
4003 return copy_to_user(ifr->ifr_data, cfg, sizeof(*cfg)) ? -EFAULT : 0;
4004}
4005
ef9814de
EBE
4006static int mlx5e_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
4007{
1170fbd8
FD
4008 struct mlx5e_priv *priv = netdev_priv(dev);
4009
ef9814de
EBE
4010 switch (cmd) {
4011 case SIOCSHWTSTAMP:
1170fbd8 4012 return mlx5e_hwstamp_set(priv, ifr);
ef9814de 4013 case SIOCGHWTSTAMP:
1170fbd8 4014 return mlx5e_hwstamp_get(priv, ifr);
ef9814de
EBE
4015 default:
4016 return -EOPNOTSUPP;
4017 }
4018}
4019
e80541ec 4020#ifdef CONFIG_MLX5_ESWITCH
073caf50 4021int mlx5e_set_vf_mac(struct net_device *dev, int vf, u8 *mac)
66e49ded
SM
4022{
4023 struct mlx5e_priv *priv = netdev_priv(dev);
4024 struct mlx5_core_dev *mdev = priv->mdev;
4025
4026 return mlx5_eswitch_set_vport_mac(mdev->priv.eswitch, vf + 1, mac);
4027}
4028
79aab093
MS
4029static int mlx5e_set_vf_vlan(struct net_device *dev, int vf, u16 vlan, u8 qos,
4030 __be16 vlan_proto)
66e49ded
SM
4031{
4032 struct mlx5e_priv *priv = netdev_priv(dev);
4033 struct mlx5_core_dev *mdev = priv->mdev;
4034
79aab093
MS
4035 if (vlan_proto != htons(ETH_P_8021Q))
4036 return -EPROTONOSUPPORT;
4037
66e49ded
SM
4038 return mlx5_eswitch_set_vport_vlan(mdev->priv.eswitch, vf + 1,
4039 vlan, qos);
4040}
4041
f942380c
MHY
4042static int mlx5e_set_vf_spoofchk(struct net_device *dev, int vf, bool setting)
4043{
4044 struct mlx5e_priv *priv = netdev_priv(dev);
4045 struct mlx5_core_dev *mdev = priv->mdev;
4046
4047 return mlx5_eswitch_set_vport_spoofchk(mdev->priv.eswitch, vf + 1, setting);
4048}
4049
1edc57e2
MHY
4050static int mlx5e_set_vf_trust(struct net_device *dev, int vf, bool setting)
4051{
4052 struct mlx5e_priv *priv = netdev_priv(dev);
4053 struct mlx5_core_dev *mdev = priv->mdev;
4054
4055 return mlx5_eswitch_set_vport_trust(mdev->priv.eswitch, vf + 1, setting);
4056}
bd77bf1c 4057
073caf50
OG
4058int mlx5e_set_vf_rate(struct net_device *dev, int vf, int min_tx_rate,
4059 int max_tx_rate)
bd77bf1c
MHY
4060{
4061 struct mlx5e_priv *priv = netdev_priv(dev);
4062 struct mlx5_core_dev *mdev = priv->mdev;
4063
bd77bf1c 4064 return mlx5_eswitch_set_vport_rate(mdev->priv.eswitch, vf + 1,
c9497c98 4065 max_tx_rate, min_tx_rate);
bd77bf1c
MHY
4066}
4067
66e49ded
SM
4068static int mlx5_vport_link2ifla(u8 esw_link)
4069{
4070 switch (esw_link) {
cc9c82a8 4071 case MLX5_VPORT_ADMIN_STATE_DOWN:
66e49ded 4072 return IFLA_VF_LINK_STATE_DISABLE;
cc9c82a8 4073 case MLX5_VPORT_ADMIN_STATE_UP:
66e49ded
SM
4074 return IFLA_VF_LINK_STATE_ENABLE;
4075 }
4076 return IFLA_VF_LINK_STATE_AUTO;
4077}
4078
4079static int mlx5_ifla_link2vport(u8 ifla_link)
4080{
4081 switch (ifla_link) {
4082 case IFLA_VF_LINK_STATE_DISABLE:
cc9c82a8 4083 return MLX5_VPORT_ADMIN_STATE_DOWN;
66e49ded 4084 case IFLA_VF_LINK_STATE_ENABLE:
cc9c82a8 4085 return MLX5_VPORT_ADMIN_STATE_UP;
66e49ded 4086 }
cc9c82a8 4087 return MLX5_VPORT_ADMIN_STATE_AUTO;
66e49ded
SM
4088}
4089
4090static int mlx5e_set_vf_link_state(struct net_device *dev, int vf,
4091 int link_state)
4092{
4093 struct mlx5e_priv *priv = netdev_priv(dev);
4094 struct mlx5_core_dev *mdev = priv->mdev;
4095
4096 return mlx5_eswitch_set_vport_state(mdev->priv.eswitch, vf + 1,
4097 mlx5_ifla_link2vport(link_state));
4098}
4099
073caf50
OG
4100int mlx5e_get_vf_config(struct net_device *dev,
4101 int vf, struct ifla_vf_info *ivi)
66e49ded
SM
4102{
4103 struct mlx5e_priv *priv = netdev_priv(dev);
4104 struct mlx5_core_dev *mdev = priv->mdev;
4105 int err;
4106
4107 err = mlx5_eswitch_get_vport_config(mdev->priv.eswitch, vf + 1, ivi);
4108 if (err)
4109 return err;
4110 ivi->linkstate = mlx5_vport_link2ifla(ivi->linkstate);
4111 return 0;
4112}
4113
073caf50
OG
4114int mlx5e_get_vf_stats(struct net_device *dev,
4115 int vf, struct ifla_vf_stats *vf_stats)
66e49ded
SM
4116{
4117 struct mlx5e_priv *priv = netdev_priv(dev);
4118 struct mlx5_core_dev *mdev = priv->mdev;
4119
4120 return mlx5_eswitch_get_vport_stats(mdev->priv.eswitch, vf + 1,
4121 vf_stats);
4122}
e80541ec 4123#endif
66e49ded 4124
dccea6bf
SM
4125struct mlx5e_vxlan_work {
4126 struct work_struct work;
4127 struct mlx5e_priv *priv;
4128 u16 port;
4129};
4130
4131static void mlx5e_vxlan_add_work(struct work_struct *work)
4132{
4133 struct mlx5e_vxlan_work *vxlan_work =
4134 container_of(work, struct mlx5e_vxlan_work, work);
4135 struct mlx5e_priv *priv = vxlan_work->priv;
4136 u16 port = vxlan_work->port;
4137
4138 mutex_lock(&priv->state_lock);
358aa5ce 4139 mlx5_vxlan_add_port(priv->mdev->vxlan, port);
dccea6bf
SM
4140 mutex_unlock(&priv->state_lock);
4141
4142 kfree(vxlan_work);
4143}
4144
4145static void mlx5e_vxlan_del_work(struct work_struct *work)
4146{
4147 struct mlx5e_vxlan_work *vxlan_work =
4148 container_of(work, struct mlx5e_vxlan_work, work);
4149 struct mlx5e_priv *priv = vxlan_work->priv;
4150 u16 port = vxlan_work->port;
4151
4152 mutex_lock(&priv->state_lock);
358aa5ce 4153 mlx5_vxlan_del_port(priv->mdev->vxlan, port);
dccea6bf
SM
4154 mutex_unlock(&priv->state_lock);
4155 kfree(vxlan_work);
4156}
4157
4158static void mlx5e_vxlan_queue_work(struct mlx5e_priv *priv, u16 port, int add)
4159{
4160 struct mlx5e_vxlan_work *vxlan_work;
4161
4162 vxlan_work = kmalloc(sizeof(*vxlan_work), GFP_ATOMIC);
4163 if (!vxlan_work)
4164 return;
4165
4166 if (add)
4167 INIT_WORK(&vxlan_work->work, mlx5e_vxlan_add_work);
4168 else
4169 INIT_WORK(&vxlan_work->work, mlx5e_vxlan_del_work);
4170
4171 vxlan_work->priv = priv;
4172 vxlan_work->port = port;
4173 queue_work(priv->wq, &vxlan_work->work);
4174}
4175
073caf50 4176void mlx5e_add_vxlan_port(struct net_device *netdev, struct udp_tunnel_info *ti)
b3f63c3d
MF
4177{
4178 struct mlx5e_priv *priv = netdev_priv(netdev);
4179
974c3f30
AD
4180 if (ti->type != UDP_TUNNEL_TYPE_VXLAN)
4181 return;
4182
358aa5ce 4183 if (!mlx5_vxlan_allowed(priv->mdev->vxlan))
b3f63c3d
MF
4184 return;
4185
278d7f3d 4186 mlx5e_vxlan_queue_work(priv, be16_to_cpu(ti->port), 1);
b3f63c3d
MF
4187}
4188
073caf50 4189void mlx5e_del_vxlan_port(struct net_device *netdev, struct udp_tunnel_info *ti)
b3f63c3d
MF
4190{
4191 struct mlx5e_priv *priv = netdev_priv(netdev);
4192
974c3f30
AD
4193 if (ti->type != UDP_TUNNEL_TYPE_VXLAN)
4194 return;
4195
358aa5ce 4196 if (!mlx5_vxlan_allowed(priv->mdev->vxlan))
b3f63c3d
MF
4197 return;
4198
278d7f3d 4199 mlx5e_vxlan_queue_work(priv, be16_to_cpu(ti->port), 0);
b3f63c3d
MF
4200}
4201
27299841
GP
4202static netdev_features_t mlx5e_tunnel_features_check(struct mlx5e_priv *priv,
4203 struct sk_buff *skb,
4204 netdev_features_t features)
b3f63c3d 4205{
2989ad1e 4206 unsigned int offset = 0;
b3f63c3d 4207 struct udphdr *udph;
27299841
GP
4208 u8 proto;
4209 u16 port;
b3f63c3d
MF
4210
4211 switch (vlan_get_protocol(skb)) {
4212 case htons(ETH_P_IP):
4213 proto = ip_hdr(skb)->protocol;
4214 break;
4215 case htons(ETH_P_IPV6):
2989ad1e 4216 proto = ipv6_find_hdr(skb, &offset, -1, NULL, NULL);
b3f63c3d
MF
4217 break;
4218 default:
4219 goto out;
4220 }
4221
27299841
GP
4222 switch (proto) {
4223 case IPPROTO_GRE:
4224 return features;
4225 case IPPROTO_UDP:
b3f63c3d
MF
4226 udph = udp_hdr(skb);
4227 port = be16_to_cpu(udph->dest);
b3f63c3d 4228
27299841 4229 /* Verify if UDP port is being offloaded by HW */
358aa5ce 4230 if (mlx5_vxlan_lookup_port(priv->mdev->vxlan, port))
27299841 4231 return features;
e3cfc7e6
MS
4232
4233#if IS_ENABLED(CONFIG_GENEVE)
4234 /* Support Geneve offload for default UDP port */
4235 if (port == GENEVE_UDP_PORT && mlx5_geneve_tx_allowed(priv->mdev))
4236 return features;
4237#endif
27299841 4238 }
b3f63c3d
MF
4239
4240out:
4241 /* Disable CSUM and GSO if the udp dport is not offloaded by HW */
4242 return features & ~(NETIF_F_CSUM_MASK | NETIF_F_GSO_MASK);
4243}
4244
073caf50
OG
4245netdev_features_t mlx5e_features_check(struct sk_buff *skb,
4246 struct net_device *netdev,
4247 netdev_features_t features)
b3f63c3d
MF
4248{
4249 struct mlx5e_priv *priv = netdev_priv(netdev);
4250
4251 features = vlan_features_check(skb, features);
4252 features = vxlan_features_check(skb, features);
4253
2ac9cfe7
IT
4254#ifdef CONFIG_MLX5_EN_IPSEC
4255 if (mlx5e_ipsec_feature_check(skb, netdev, features))
4256 return features;
4257#endif
4258
b3f63c3d
MF
4259 /* Validate if the tunneled packet is being offloaded by HW */
4260 if (skb->encapsulation &&
4261 (features & NETIF_F_CSUM_MASK || features & NETIF_F_GSO_MASK))
27299841 4262 return mlx5e_tunnel_features_check(priv, skb, features);
b3f63c3d
MF
4263
4264 return features;
4265}
4266
bfc647d5 4267static void mlx5e_tx_timeout_work(struct work_struct *work)
3947ca18 4268{
bfc647d5
EBE
4269 struct mlx5e_priv *priv = container_of(work, struct mlx5e_priv,
4270 tx_timeout_work);
7d91126b
EBE
4271 bool report_failed = false;
4272 int err;
4273 int i;
3947ca18 4274
bfc647d5
EBE
4275 rtnl_lock();
4276 mutex_lock(&priv->state_lock);
4277
4278 if (!test_bit(MLX5E_STATE_OPENED, &priv->state))
4279 goto unlock;
3947ca18 4280
6a9764ef 4281 for (i = 0; i < priv->channels.num * priv->channels.params.num_tc; i++) {
7d91126b
EBE
4282 struct netdev_queue *dev_queue =
4283 netdev_get_tx_queue(priv->netdev, i);
acc6c595 4284 struct mlx5e_txqsq *sq = priv->txq2sq[i];
3947ca18 4285
84990945 4286 if (!netif_xmit_stopped(dev_queue))
3947ca18 4287 continue;
bfc647d5 4288
7d91126b
EBE
4289 if (mlx5e_tx_reporter_timeout(sq))
4290 report_failed = true;
3947ca18
DJ
4291 }
4292
7d91126b 4293 if (!report_failed)
30e5c2c6
DM
4294 goto unlock;
4295
484c1ada 4296 err = mlx5e_safe_reopen_channels(priv);
30e5c2c6
DM
4297 if (err)
4298 netdev_err(priv->netdev,
484c1ada 4299 "mlx5e_safe_reopen_channels failed recovering from a tx_timeout, err(%d).\n",
30e5c2c6
DM
4300 err);
4301
bfc647d5
EBE
4302unlock:
4303 mutex_unlock(&priv->state_lock);
4304 rtnl_unlock();
4305}
4306
4307static void mlx5e_tx_timeout(struct net_device *dev)
4308{
4309 struct mlx5e_priv *priv = netdev_priv(dev);
4310
4311 netdev_err(dev, "TX timeout detected\n");
4312 queue_work(priv->wq, &priv->tx_timeout_work);
3947ca18
DJ
4313}
4314
a26a5bdf 4315static int mlx5e_xdp_allowed(struct mlx5e_priv *priv, struct bpf_prog *prog)
0ec13877
TT
4316{
4317 struct net_device *netdev = priv->netdev;
a26a5bdf 4318 struct mlx5e_channels new_channels = {};
0ec13877
TT
4319
4320 if (priv->channels.params.lro_en) {
4321 netdev_warn(netdev, "can't set XDP while LRO is on, disable LRO first\n");
4322 return -EINVAL;
4323 }
4324
4325 if (MLX5_IPSEC_DEV(priv->mdev)) {
4326 netdev_warn(netdev, "can't set XDP with IPSec offload\n");
4327 return -EINVAL;
4328 }
4329
a26a5bdf
TT
4330 new_channels.params = priv->channels.params;
4331 new_channels.params.xdp_prog = prog;
4332
db05815b
MM
4333 /* No XSK params: AF_XDP can't be enabled yet at the point of setting
4334 * the XDP program.
4335 */
4336 if (!mlx5e_rx_is_linear_skb(&new_channels.params, NULL)) {
a26a5bdf 4337 netdev_warn(netdev, "XDP is not allowed with MTU(%d) > %d\n",
d460c271 4338 new_channels.params.sw_mtu,
a011b49f 4339 mlx5e_xdp_max_mtu(&new_channels.params, NULL));
a26a5bdf
TT
4340 return -EINVAL;
4341 }
4342
0ec13877
TT
4343 return 0;
4344}
4345
db05815b
MM
4346static int mlx5e_xdp_update_state(struct mlx5e_priv *priv)
4347{
4348 if (priv->channels.params.xdp_prog)
4349 mlx5e_xdp_set_open(priv);
4350 else
4351 mlx5e_xdp_set_closed(priv);
4352
4353 return 0;
4354}
4355
86994156
RS
4356static int mlx5e_xdp_set(struct net_device *netdev, struct bpf_prog *prog)
4357{
4358 struct mlx5e_priv *priv = netdev_priv(netdev);
4359 struct bpf_prog *old_prog;
86994156 4360 bool reset, was_opened;
96d39502 4361 int err = 0;
86994156
RS
4362 int i;
4363
4364 mutex_lock(&priv->state_lock);
4365
0ec13877 4366 if (prog) {
a26a5bdf 4367 err = mlx5e_xdp_allowed(priv, prog);
0ec13877
TT
4368 if (err)
4369 goto unlock;
547eede0
IT
4370 }
4371
86994156
RS
4372 was_opened = test_bit(MLX5E_STATE_OPENED, &priv->state);
4373 /* no need for full reset when exchanging programs */
6a9764ef 4374 reset = (!priv->channels.params.xdp_prog || !prog);
86994156 4375
c54c0629
DB
4376 if (was_opened && !reset) {
4377 /* num_channels is invariant here, so we can take the
4378 * batched reference right upfront.
4379 */
6a9764ef 4380 prog = bpf_prog_add(prog, priv->channels.num);
c54c0629
DB
4381 if (IS_ERR(prog)) {
4382 err = PTR_ERR(prog);
4383 goto unlock;
4384 }
4385 }
86994156 4386
e1895324
MM
4387 if (was_opened && reset) {
4388 struct mlx5e_channels new_channels = {};
4389
4390 new_channels.params = priv->channels.params;
4391 new_channels.params.xdp_prog = prog;
4392 mlx5e_set_rq_type(priv->mdev, &new_channels.params);
4393 old_prog = priv->channels.params.xdp_prog;
4394
db05815b 4395 err = mlx5e_safe_switch_channels(priv, &new_channels, mlx5e_xdp_update_state);
e1895324
MM
4396 if (err)
4397 goto unlock;
4398 } else {
4399 /* exchange programs, extra prog reference we got from caller
4400 * as long as we don't fail from this point onwards.
4401 */
4402 old_prog = xchg(&priv->channels.params.xdp_prog, prog);
4403 }
4404
86994156
RS
4405 if (old_prog)
4406 bpf_prog_put(old_prog);
4407
e1895324 4408 if (!was_opened && reset) /* change RQ type according to priv->xdp_prog */
2a0f561b 4409 mlx5e_set_rq_type(priv->mdev, &priv->channels.params);
86994156 4410
e1895324 4411 if (!was_opened || reset)
86994156
RS
4412 goto unlock;
4413
4414 /* exchanging programs w/o reset, we update ref counts on behalf
4415 * of the channels RQs here.
4416 */
ff9c852f
SM
4417 for (i = 0; i < priv->channels.num; i++) {
4418 struct mlx5e_channel *c = priv->channels.c[i];
db05815b 4419 bool xsk_open = test_bit(MLX5E_CHANNEL_STATE_XSK, c->state);
86994156 4420
c0f1147d 4421 clear_bit(MLX5E_RQ_STATE_ENABLED, &c->rq.state);
db05815b
MM
4422 if (xsk_open)
4423 clear_bit(MLX5E_RQ_STATE_ENABLED, &c->xskrq.state);
86994156
RS
4424 napi_synchronize(&c->napi);
4425 /* prevent mlx5e_poll_rx_cq from accessing rq->xdp_prog */
4426
4427 old_prog = xchg(&c->rq.xdp_prog, prog);
db05815b
MM
4428 if (old_prog)
4429 bpf_prog_put(old_prog);
4430
4431 if (xsk_open) {
4432 old_prog = xchg(&c->xskrq.xdp_prog, prog);
4433 if (old_prog)
4434 bpf_prog_put(old_prog);
4435 }
86994156 4436
c0f1147d 4437 set_bit(MLX5E_RQ_STATE_ENABLED, &c->rq.state);
db05815b
MM
4438 if (xsk_open)
4439 set_bit(MLX5E_RQ_STATE_ENABLED, &c->xskrq.state);
86994156 4440 /* napi_schedule in case we have missed anything */
86994156 4441 napi_schedule(&c->napi);
86994156
RS
4442 }
4443
4444unlock:
4445 mutex_unlock(&priv->state_lock);
4446 return err;
4447}
4448
821b2e29 4449static u32 mlx5e_xdp_query(struct net_device *dev)
86994156
RS
4450{
4451 struct mlx5e_priv *priv = netdev_priv(dev);
821b2e29
MKL
4452 const struct bpf_prog *xdp_prog;
4453 u32 prog_id = 0;
86994156 4454
821b2e29
MKL
4455 mutex_lock(&priv->state_lock);
4456 xdp_prog = priv->channels.params.xdp_prog;
4457 if (xdp_prog)
4458 prog_id = xdp_prog->aux->id;
4459 mutex_unlock(&priv->state_lock);
4460
4461 return prog_id;
86994156
RS
4462}
4463
f4e63525 4464static int mlx5e_xdp(struct net_device *dev, struct netdev_bpf *xdp)
86994156
RS
4465{
4466 switch (xdp->command) {
4467 case XDP_SETUP_PROG:
4468 return mlx5e_xdp_set(dev, xdp->prog);
4469 case XDP_QUERY_PROG:
821b2e29 4470 xdp->prog_id = mlx5e_xdp_query(dev);
86994156 4471 return 0;
db05815b
MM
4472 case XDP_SETUP_XSK_UMEM:
4473 return mlx5e_xsk_setup_umem(dev, xdp->xsk.umem,
4474 xdp->xsk.queue_id);
86994156
RS
4475 default:
4476 return -EINVAL;
4477 }
4478}
4479
4b89251d
HN
4480#ifdef CONFIG_MLX5_ESWITCH
4481static int mlx5e_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq,
4482 struct net_device *dev, u32 filter_mask,
4483 int nlflags)
4484{
4485 struct mlx5e_priv *priv = netdev_priv(dev);
4486 struct mlx5_core_dev *mdev = priv->mdev;
4487 u8 mode, setting;
4488 int err;
4489
4490 err = mlx5_eswitch_get_vepa(mdev->priv.eswitch, &setting);
4491 if (err)
4492 return err;
4493 mode = setting ? BRIDGE_MODE_VEPA : BRIDGE_MODE_VEB;
4494 return ndo_dflt_bridge_getlink(skb, pid, seq, dev,
4495 mode,
4496 0, 0, nlflags, filter_mask, NULL);
4497}
4498
4499static int mlx5e_bridge_setlink(struct net_device *dev, struct nlmsghdr *nlh,
4500 u16 flags, struct netlink_ext_ack *extack)
4501{
4502 struct mlx5e_priv *priv = netdev_priv(dev);
4503 struct mlx5_core_dev *mdev = priv->mdev;
4504 struct nlattr *attr, *br_spec;
4505 u16 mode = BRIDGE_MODE_UNDEF;
4506 u8 setting;
4507 int rem;
4508
4509 br_spec = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), IFLA_AF_SPEC);
4510 if (!br_spec)
4511 return -EINVAL;
4512
4513 nla_for_each_nested(attr, br_spec, rem) {
4514 if (nla_type(attr) != IFLA_BRIDGE_MODE)
4515 continue;
4516
4517 if (nla_len(attr) < sizeof(mode))
4518 return -EINVAL;
4519
4520 mode = nla_get_u16(attr);
4521 if (mode > BRIDGE_MODE_VEPA)
4522 return -EINVAL;
4523
4524 break;
4525 }
4526
4527 if (mode == BRIDGE_MODE_UNDEF)
4528 return -EINVAL;
4529
4530 setting = (mode == BRIDGE_MODE_VEPA) ? 1 : 0;
4531 return mlx5_eswitch_set_vepa(mdev->priv.eswitch, setting);
4532}
4533#endif
4534
4d8fcf21 4535const struct net_device_ops mlx5e_netdev_ops = {
f62b8bb8
AV
4536 .ndo_open = mlx5e_open,
4537 .ndo_stop = mlx5e_close,
4538 .ndo_start_xmit = mlx5e_xmit,
0cf0f6d3 4539 .ndo_setup_tc = mlx5e_setup_tc,
08fb1dac 4540 .ndo_select_queue = mlx5e_select_queue,
f62b8bb8
AV
4541 .ndo_get_stats64 = mlx5e_get_stats,
4542 .ndo_set_rx_mode = mlx5e_set_rx_mode,
4543 .ndo_set_mac_address = mlx5e_set_mac,
b0eed40e
SM
4544 .ndo_vlan_rx_add_vid = mlx5e_vlan_rx_add_vid,
4545 .ndo_vlan_rx_kill_vid = mlx5e_vlan_rx_kill_vid,
f62b8bb8 4546 .ndo_set_features = mlx5e_set_features,
7d92d580 4547 .ndo_fix_features = mlx5e_fix_features,
250a42b6 4548 .ndo_change_mtu = mlx5e_change_nic_mtu,
b0eed40e 4549 .ndo_do_ioctl = mlx5e_ioctl,
507f0c81 4550 .ndo_set_tx_maxrate = mlx5e_set_tx_maxrate,
706b3583
SM
4551 .ndo_udp_tunnel_add = mlx5e_add_vxlan_port,
4552 .ndo_udp_tunnel_del = mlx5e_del_vxlan_port,
4553 .ndo_features_check = mlx5e_features_check,
3947ca18 4554 .ndo_tx_timeout = mlx5e_tx_timeout,
f4e63525 4555 .ndo_bpf = mlx5e_xdp,
58b99ee3 4556 .ndo_xdp_xmit = mlx5e_xdp_xmit,
db05815b 4557 .ndo_xsk_async_xmit = mlx5e_xsk_async_xmit,
ec080045
SM
4558#ifdef CONFIG_MLX5_EN_ARFS
4559 .ndo_rx_flow_steer = mlx5e_rx_flow_steer,
4560#endif
e80541ec 4561#ifdef CONFIG_MLX5_ESWITCH
4b89251d
HN
4562 .ndo_bridge_setlink = mlx5e_bridge_setlink,
4563 .ndo_bridge_getlink = mlx5e_bridge_getlink,
4564
706b3583 4565 /* SRIOV E-Switch NDOs */
b0eed40e
SM
4566 .ndo_set_vf_mac = mlx5e_set_vf_mac,
4567 .ndo_set_vf_vlan = mlx5e_set_vf_vlan,
f942380c 4568 .ndo_set_vf_spoofchk = mlx5e_set_vf_spoofchk,
1edc57e2 4569 .ndo_set_vf_trust = mlx5e_set_vf_trust,
bd77bf1c 4570 .ndo_set_vf_rate = mlx5e_set_vf_rate,
b0eed40e
SM
4571 .ndo_get_vf_config = mlx5e_get_vf_config,
4572 .ndo_set_vf_link_state = mlx5e_set_vf_link_state,
4573 .ndo_get_vf_stats = mlx5e_get_vf_stats,
e80541ec 4574#endif
f62b8bb8
AV
4575};
4576
4577static int mlx5e_check_required_hca_cap(struct mlx5_core_dev *mdev)
4578{
4579 if (MLX5_CAP_GEN(mdev, port_type) != MLX5_CAP_PORT_TYPE_ETH)
9eb78923 4580 return -EOPNOTSUPP;
f62b8bb8
AV
4581 if (!MLX5_CAP_GEN(mdev, eth_net_offloads) ||
4582 !MLX5_CAP_GEN(mdev, nic_flow_table) ||
4583 !MLX5_CAP_ETH(mdev, csum_cap) ||
4584 !MLX5_CAP_ETH(mdev, max_lso_cap) ||
4585 !MLX5_CAP_ETH(mdev, vlan_cap) ||
796a27ec
GP
4586 !MLX5_CAP_ETH(mdev, rss_ind_tbl_cap) ||
4587 MLX5_CAP_FLOWTABLE(mdev,
4588 flow_table_properties_nic_receive.max_ft_level)
4589 < 3) {
f62b8bb8
AV
4590 mlx5_core_warn(mdev,
4591 "Not creating net device, some required device capabilities are missing\n");
9eb78923 4592 return -EOPNOTSUPP;
f62b8bb8 4593 }
66189961
TT
4594 if (!MLX5_CAP_ETH(mdev, self_lb_en_modifiable))
4595 mlx5_core_warn(mdev, "Self loop back prevention is not supported\n");
7524a5d8 4596 if (!MLX5_CAP_GEN(mdev, cq_moderation))
3e432ab6 4597 mlx5_core_warn(mdev, "CQ moderation is not supported\n");
66189961 4598
f62b8bb8
AV
4599 return 0;
4600}
4601
d4b6c488 4602void mlx5e_build_default_indir_rqt(u32 *indirection_rqt, int len,
85082dba
TT
4603 int num_channels)
4604{
4605 int i;
4606
4607 for (i = 0; i < len; i++)
4608 indirection_rqt[i] = i % num_channels;
4609}
4610
0608d4db 4611static bool slow_pci_heuristic(struct mlx5_core_dev *mdev)
b797a684 4612{
0608d4db
TT
4613 u32 link_speed = 0;
4614 u32 pci_bw = 0;
b797a684 4615
2c81bfd5 4616 mlx5e_port_max_linkspeed(mdev, &link_speed);
3c0d551e 4617 pci_bw = pcie_bandwidth_available(mdev->pdev, NULL, NULL, NULL);
0608d4db
TT
4618 mlx5_core_dbg_once(mdev, "Max link speed = %d, PCI BW = %d\n",
4619 link_speed, pci_bw);
4620
4621#define MLX5E_SLOW_PCI_RATIO (2)
4622
4623 return link_speed && pci_bw &&
4624 link_speed > MLX5E_SLOW_PCI_RATIO * pci_bw;
0f6e4cf6
EBE
4625}
4626
8960b389 4627static struct dim_cq_moder mlx5e_get_def_tx_moderation(u8 cq_period_mode)
0088cbbc 4628{
8960b389 4629 struct dim_cq_moder moder;
cbce4f44
TG
4630
4631 moder.cq_period_mode = cq_period_mode;
4632 moder.pkts = MLX5E_PARAMS_DEFAULT_TX_CQ_MODERATION_PKTS;
4633 moder.usec = MLX5E_PARAMS_DEFAULT_TX_CQ_MODERATION_USEC;
4634 if (cq_period_mode == MLX5_CQ_PERIOD_MODE_START_FROM_CQE)
4635 moder.usec = MLX5E_PARAMS_DEFAULT_TX_CQ_MODERATION_USEC_FROM_CQE;
4636
4637 return moder;
4638}
0088cbbc 4639
8960b389 4640static struct dim_cq_moder mlx5e_get_def_rx_moderation(u8 cq_period_mode)
cbce4f44 4641{
8960b389 4642 struct dim_cq_moder moder;
0088cbbc 4643
cbce4f44
TG
4644 moder.cq_period_mode = cq_period_mode;
4645 moder.pkts = MLX5E_PARAMS_DEFAULT_RX_CQ_MODERATION_PKTS;
4646 moder.usec = MLX5E_PARAMS_DEFAULT_RX_CQ_MODERATION_USEC;
0088cbbc 4647 if (cq_period_mode == MLX5_CQ_PERIOD_MODE_START_FROM_CQE)
cbce4f44
TG
4648 moder.usec = MLX5E_PARAMS_DEFAULT_RX_CQ_MODERATION_USEC_FROM_CQE;
4649
4650 return moder;
4651}
4652
4653static u8 mlx5_to_net_dim_cq_period_mode(u8 cq_period_mode)
4654{
4655 return cq_period_mode == MLX5_CQ_PERIOD_MODE_START_FROM_CQE ?
c002bd52
TG
4656 DIM_CQ_PERIOD_MODE_START_FROM_CQE :
4657 DIM_CQ_PERIOD_MODE_START_FROM_EQE;
cbce4f44
TG
4658}
4659
4660void mlx5e_set_tx_cq_mode_params(struct mlx5e_params *params, u8 cq_period_mode)
4661{
4662 if (params->tx_dim_enabled) {
4663 u8 dim_period_mode = mlx5_to_net_dim_cq_period_mode(cq_period_mode);
4664
4665 params->tx_cq_moderation = net_dim_get_def_tx_moderation(dim_period_mode);
4666 } else {
4667 params->tx_cq_moderation = mlx5e_get_def_tx_moderation(cq_period_mode);
4668 }
0088cbbc
TG
4669
4670 MLX5E_SET_PFLAG(params, MLX5E_PFLAG_TX_CQE_BASED_MODER,
4671 params->tx_cq_moderation.cq_period_mode ==
4672 MLX5_CQ_PERIOD_MODE_START_FROM_CQE);
4673}
4674
9908aa29
TT
4675void mlx5e_set_rx_cq_mode_params(struct mlx5e_params *params, u8 cq_period_mode)
4676{
9a317425 4677 if (params->rx_dim_enabled) {
cbce4f44
TG
4678 u8 dim_period_mode = mlx5_to_net_dim_cq_period_mode(cq_period_mode);
4679
4680 params->rx_cq_moderation = net_dim_get_def_rx_moderation(dim_period_mode);
4681 } else {
4682 params->rx_cq_moderation = mlx5e_get_def_rx_moderation(cq_period_mode);
9a317425 4683 }
457fcd8a 4684
6a9764ef 4685 MLX5E_SET_PFLAG(params, MLX5E_PFLAG_RX_CQE_BASED_MODER,
0088cbbc
TG
4686 params->rx_cq_moderation.cq_period_mode ==
4687 MLX5_CQ_PERIOD_MODE_START_FROM_CQE);
9908aa29
TT
4688}
4689
707129dc 4690static u32 mlx5e_choose_lro_timeout(struct mlx5_core_dev *mdev, u32 wanted_timeout)
2b029556
SM
4691{
4692 int i;
4693
4694 /* The supported periods are organized in ascending order */
4695 for (i = 0; i < MLX5E_LRO_TIMEOUT_ARR_SIZE - 1; i++)
4696 if (MLX5_CAP_ETH(mdev, lro_timer_supported_periods[i]) >= wanted_timeout)
4697 break;
4698
4699 return MLX5_CAP_ETH(mdev, lro_timer_supported_periods[i]);
4700}
4701
749359f4
GT
4702void mlx5e_build_rq_params(struct mlx5_core_dev *mdev,
4703 struct mlx5e_params *params)
4704{
4705 /* Prefer Striding RQ, unless any of the following holds:
4706 * - Striding RQ configuration is not possible/supported.
4707 * - Slow PCI heuristic.
4708 * - Legacy RQ would use linear SKB while Striding RQ would use non-linear.
db05815b
MM
4709 *
4710 * No XSK params: checking the availability of striding RQ in general.
749359f4
GT
4711 */
4712 if (!slow_pci_heuristic(mdev) &&
4713 mlx5e_striding_rq_possible(mdev, params) &&
db05815b
MM
4714 (mlx5e_rx_mpwqe_is_linear_skb(mdev, params, NULL) ||
4715 !mlx5e_rx_is_linear_skb(params, NULL)))
749359f4
GT
4716 MLX5E_SET_PFLAG(params, MLX5E_PFLAG_RX_STRIDING_RQ, true);
4717 mlx5e_set_rq_type(mdev, params);
4718 mlx5e_init_rq_type_params(mdev, params);
4719}
4720
bbeb53b8
AL
4721void mlx5e_build_rss_params(struct mlx5e_rss_params *rss_params,
4722 u16 num_channels)
3edc0159 4723{
756c4160
AL
4724 enum mlx5e_traffic_types tt;
4725
7ee2ace9 4726 rss_params->hfunc = ETH_RSS_HASH_TOP;
bbeb53b8
AL
4727 netdev_rss_key_fill(rss_params->toeplitz_hash_key,
4728 sizeof(rss_params->toeplitz_hash_key));
4729 mlx5e_build_default_indir_rqt(rss_params->indirection_rqt,
4730 MLX5E_INDIR_RQT_SIZE, num_channels);
756c4160
AL
4731 for (tt = 0; tt < MLX5E_NUM_INDIR_TIRS; tt++)
4732 rss_params->rx_hash_fields[tt] =
4733 tirc_default_config[tt].rx_hash_fields;
3edc0159
GT
4734}
4735
8f493ffd 4736void mlx5e_build_nic_params(struct mlx5_core_dev *mdev,
db05815b 4737 struct mlx5e_xsk *xsk,
bbeb53b8 4738 struct mlx5e_rss_params *rss_params,
8f493ffd 4739 struct mlx5e_params *params,
472a1e44 4740 u16 max_channels, u16 mtu)
f62b8bb8 4741{
48bfc397 4742 u8 rx_cq_period_mode;
2fc4bfb7 4743
472a1e44
TT
4744 params->sw_mtu = mtu;
4745 params->hard_mtu = MLX5E_ETH_HARD_MTU;
6a9764ef
SM
4746 params->num_channels = max_channels;
4747 params->num_tc = 1;
2b029556 4748
6a9764ef
SM
4749 /* SQ */
4750 params->log_sq_size = is_kdump_kernel() ?
b4e029da
KH
4751 MLX5E_PARAMS_MINIMUM_LOG_SQ_SIZE :
4752 MLX5E_PARAMS_DEFAULT_LOG_SQ_SIZE;
461017cb 4753
6277053a
TT
4754 /* XDP SQ */
4755 MLX5E_SET_PFLAG(params, MLX5E_PFLAG_XDP_TX_MPWQE,
4756 MLX5_CAP_ETH(mdev, enhanced_multi_pkt_send_wqe));
4757
b797a684 4758 /* set CQE compression */
6a9764ef 4759 params->rx_cqe_compress_def = false;
b797a684 4760 if (MLX5_CAP_GEN(mdev, cqe_compression) &&
e53eef63 4761 MLX5_CAP_GEN(mdev, vport_group_manager))
0608d4db 4762 params->rx_cqe_compress_def = slow_pci_heuristic(mdev);
0f6e4cf6 4763
6a9764ef 4764 MLX5E_SET_PFLAG(params, MLX5E_PFLAG_RX_CQE_COMPRESS, params->rx_cqe_compress_def);
b856df28 4765 MLX5E_SET_PFLAG(params, MLX5E_PFLAG_RX_NO_CSUM_COMPLETE, false);
6a9764ef
SM
4766
4767 /* RQ */
749359f4 4768 mlx5e_build_rq_params(mdev, params);
b797a684 4769
6a9764ef 4770 /* HW LRO */
c139dbfd 4771
5426a0b2 4772 /* TODO: && MLX5_CAP_ETH(mdev, lro_cap) */
db05815b
MM
4773 if (params->rq_wq_type == MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ) {
4774 /* No XSK params: checking the availability of striding RQ in general. */
4775 if (!mlx5e_rx_mpwqe_is_linear_skb(mdev, params, NULL))
619a8f2a 4776 params->lro_en = !slow_pci_heuristic(mdev);
db05815b 4777 }
6a9764ef 4778 params->lro_timeout = mlx5e_choose_lro_timeout(mdev, MLX5E_DEFAULT_LRO_TIMEOUT);
b0d4660b 4779
6a9764ef 4780 /* CQ moderation params */
48bfc397 4781 rx_cq_period_mode = MLX5_CAP_GEN(mdev, cq_period_start_from_cqe) ?
6a9764ef
SM
4782 MLX5_CQ_PERIOD_MODE_START_FROM_CQE :
4783 MLX5_CQ_PERIOD_MODE_START_FROM_EQE;
9a317425 4784 params->rx_dim_enabled = MLX5_CAP_GEN(mdev, cq_moderation);
cbce4f44 4785 params->tx_dim_enabled = MLX5_CAP_GEN(mdev, cq_moderation);
48bfc397
TG
4786 mlx5e_set_rx_cq_mode_params(params, rx_cq_period_mode);
4787 mlx5e_set_tx_cq_mode_params(params, MLX5_CQ_PERIOD_MODE_START_FROM_EQE);
9908aa29 4788
6a9764ef 4789 /* TX inline */
fbcb127e 4790 params->tx_min_inline_mode = mlx5e_params_calculate_tx_min_inline(mdev);
a6f402e4 4791
6a9764ef 4792 /* RSS */
bbeb53b8 4793 mlx5e_build_rss_params(rss_params, params->num_channels);
69dad68d
TT
4794 params->tunneled_offload_en =
4795 mlx5e_tunnel_inner_ft_supported(mdev);
db05815b
MM
4796
4797 /* AF_XDP */
4798 params->xsk = xsk;
6a9764ef 4799}
f62b8bb8 4800
f62b8bb8
AV
4801static void mlx5e_set_netdev_dev_addr(struct net_device *netdev)
4802{
4803 struct mlx5e_priv *priv = netdev_priv(netdev);
4804
e1d974d0 4805 mlx5_query_mac_address(priv->mdev, netdev->dev_addr);
108805fc
SM
4806 if (is_zero_ether_addr(netdev->dev_addr) &&
4807 !MLX5_CAP_GEN(priv->mdev, vport_group_manager)) {
4808 eth_hw_addr_random(netdev);
4809 mlx5_core_info(priv->mdev, "Assigned random MAC address %pM\n", netdev->dev_addr);
4810 }
f62b8bb8
AV
4811}
4812
6bfd390b 4813static void mlx5e_build_nic_netdev(struct net_device *netdev)
f62b8bb8
AV
4814{
4815 struct mlx5e_priv *priv = netdev_priv(netdev);
4816 struct mlx5_core_dev *mdev = priv->mdev;
94cb1ebb
EBE
4817 bool fcs_supported;
4818 bool fcs_enabled;
f62b8bb8 4819
c42260f1 4820 SET_NETDEV_DEV(netdev, mdev->device);
f62b8bb8 4821
e80541ec
SM
4822 netdev->netdev_ops = &mlx5e_netdev_ops;
4823
08fb1dac 4824#ifdef CONFIG_MLX5_CORE_EN_DCB
e80541ec
SM
4825 if (MLX5_CAP_GEN(mdev, vport_group_manager) && MLX5_CAP_GEN(mdev, qos))
4826 netdev->dcbnl_ops = &mlx5e_dcbnl_ops;
08fb1dac 4827#endif
66e49ded 4828
f62b8bb8
AV
4829 netdev->watchdog_timeo = 15 * HZ;
4830
4831 netdev->ethtool_ops = &mlx5e_ethtool_ops;
4832
12be4b21 4833 netdev->vlan_features |= NETIF_F_SG;
e4683f35 4834 netdev->vlan_features |= NETIF_F_HW_CSUM;
f62b8bb8
AV
4835 netdev->vlan_features |= NETIF_F_GRO;
4836 netdev->vlan_features |= NETIF_F_TSO;
4837 netdev->vlan_features |= NETIF_F_TSO6;
4838 netdev->vlan_features |= NETIF_F_RXCSUM;
4839 netdev->vlan_features |= NETIF_F_RXHASH;
4840
5dc9520b
AL
4841 netdev->mpls_features |= NETIF_F_SG;
4842 netdev->mpls_features |= NETIF_F_HW_CSUM;
4843 netdev->mpls_features |= NETIF_F_TSO;
4844 netdev->mpls_features |= NETIF_F_TSO6;
4845
71186172
AH
4846 netdev->hw_enc_features |= NETIF_F_HW_VLAN_CTAG_TX;
4847 netdev->hw_enc_features |= NETIF_F_HW_VLAN_CTAG_RX;
4848
6c3a823e
TT
4849 if (!!MLX5_CAP_ETH(mdev, lro_cap) &&
4850 mlx5e_check_fragmented_striding_rq_cap(mdev))
f62b8bb8
AV
4851 netdev->vlan_features |= NETIF_F_LRO;
4852
4853 netdev->hw_features = netdev->vlan_features;
e4cf27bd 4854 netdev->hw_features |= NETIF_F_HW_VLAN_CTAG_TX;
f62b8bb8
AV
4855 netdev->hw_features |= NETIF_F_HW_VLAN_CTAG_RX;
4856 netdev->hw_features |= NETIF_F_HW_VLAN_CTAG_FILTER;
4382c7b9 4857 netdev->hw_features |= NETIF_F_HW_VLAN_STAG_TX;
f62b8bb8 4858
e3cfc7e6
MS
4859 if (mlx5_vxlan_allowed(mdev->vxlan) || mlx5_geneve_tx_allowed(mdev) ||
4860 MLX5_CAP_ETH(mdev, tunnel_stateless_gre)) {
e4683f35 4861 netdev->hw_enc_features |= NETIF_F_HW_CSUM;
b3f63c3d
MF
4862 netdev->hw_enc_features |= NETIF_F_TSO;
4863 netdev->hw_enc_features |= NETIF_F_TSO6;
27299841
GP
4864 netdev->hw_enc_features |= NETIF_F_GSO_PARTIAL;
4865 }
4866
e3cfc7e6 4867 if (mlx5_vxlan_allowed(mdev->vxlan) || mlx5_geneve_tx_allowed(mdev)) {
27299841
GP
4868 netdev->hw_features |= NETIF_F_GSO_UDP_TUNNEL |
4869 NETIF_F_GSO_UDP_TUNNEL_CSUM;
4870 netdev->hw_enc_features |= NETIF_F_GSO_UDP_TUNNEL |
4871 NETIF_F_GSO_UDP_TUNNEL_CSUM;
b49663c8 4872 netdev->gso_partial_features = NETIF_F_GSO_UDP_TUNNEL_CSUM;
b3f63c3d
MF
4873 }
4874
27299841
GP
4875 if (MLX5_CAP_ETH(mdev, tunnel_stateless_gre)) {
4876 netdev->hw_features |= NETIF_F_GSO_GRE |
4877 NETIF_F_GSO_GRE_CSUM;
4878 netdev->hw_enc_features |= NETIF_F_GSO_GRE |
4879 NETIF_F_GSO_GRE_CSUM;
4880 netdev->gso_partial_features |= NETIF_F_GSO_GRE |
4881 NETIF_F_GSO_GRE_CSUM;
4882 }
4883
3f44899e
BP
4884 netdev->hw_features |= NETIF_F_GSO_PARTIAL;
4885 netdev->gso_partial_features |= NETIF_F_GSO_UDP_L4;
4886 netdev->hw_features |= NETIF_F_GSO_UDP_L4;
4887 netdev->features |= NETIF_F_GSO_UDP_L4;
4888
94cb1ebb
EBE
4889 mlx5_query_port_fcs(mdev, &fcs_supported, &fcs_enabled);
4890
4891 if (fcs_supported)
4892 netdev->hw_features |= NETIF_F_RXALL;
4893
102722fc
GE
4894 if (MLX5_CAP_ETH(mdev, scatter_fcs))
4895 netdev->hw_features |= NETIF_F_RXFCS;
4896
f62b8bb8 4897 netdev->features = netdev->hw_features;
6a9764ef 4898 if (!priv->channels.params.lro_en)
f62b8bb8
AV
4899 netdev->features &= ~NETIF_F_LRO;
4900
94cb1ebb
EBE
4901 if (fcs_enabled)
4902 netdev->features &= ~NETIF_F_RXALL;
4903
102722fc
GE
4904 if (!priv->channels.params.scatter_fcs_en)
4905 netdev->features &= ~NETIF_F_RXFCS;
4906
c0194e2d
SM
4907 /* prefere CQE compression over rxhash */
4908 if (MLX5E_GET_PFLAG(&priv->channels.params, MLX5E_PFLAG_RX_CQE_COMPRESS))
4909 netdev->features &= ~NETIF_F_RXHASH;
4910
e8f887ac
AV
4911#define FT_CAP(f) MLX5_CAP_FLOWTABLE(mdev, flow_table_properties_nic_receive.f)
4912 if (FT_CAP(flow_modify_en) &&
4913 FT_CAP(modify_root) &&
4914 FT_CAP(identified_miss_table_mode) &&
1cabe6b0 4915 FT_CAP(flow_table_modify)) {
077ecd78 4916#ifdef CONFIG_MLX5_ESWITCH
1cabe6b0 4917 netdev->hw_features |= NETIF_F_HW_TC;
077ecd78 4918#endif
ec080045 4919#ifdef CONFIG_MLX5_EN_ARFS
1cabe6b0
MG
4920 netdev->hw_features |= NETIF_F_NTUPLE;
4921#endif
4922 }
e8f887ac 4923
f62b8bb8 4924 netdev->features |= NETIF_F_HIGHDMA;
7d92d580 4925 netdev->features |= NETIF_F_HW_VLAN_STAG_FILTER;
f62b8bb8
AV
4926
4927 netdev->priv_flags |= IFF_UNICAST_FLT;
4928
4929 mlx5e_set_netdev_dev_addr(netdev);
547eede0 4930 mlx5e_ipsec_build_netdev(priv);
c83294b9 4931 mlx5e_tls_build_netdev(priv);
f62b8bb8
AV
4932}
4933
1462e48d 4934void mlx5e_create_q_counters(struct mlx5e_priv *priv)
593cf338
RS
4935{
4936 struct mlx5_core_dev *mdev = priv->mdev;
4937 int err;
4938
4939 err = mlx5_core_alloc_q_counter(mdev, &priv->q_counter);
4940 if (err) {
4941 mlx5_core_warn(mdev, "alloc queue counter failed, %d\n", err);
4942 priv->q_counter = 0;
4943 }
7cbaf9a3
MS
4944
4945 err = mlx5_core_alloc_q_counter(mdev, &priv->drop_rq_q_counter);
4946 if (err) {
4947 mlx5_core_warn(mdev, "alloc drop RQ counter failed, %d\n", err);
4948 priv->drop_rq_q_counter = 0;
4949 }
593cf338
RS
4950}
4951
1462e48d 4952void mlx5e_destroy_q_counters(struct mlx5e_priv *priv)
593cf338 4953{
7cbaf9a3
MS
4954 if (priv->q_counter)
4955 mlx5_core_dealloc_q_counter(priv->mdev, priv->q_counter);
593cf338 4956
7cbaf9a3
MS
4957 if (priv->drop_rq_q_counter)
4958 mlx5_core_dealloc_q_counter(priv->mdev, priv->drop_rq_q_counter);
593cf338
RS
4959}
4960
182570b2
FD
4961static int mlx5e_nic_init(struct mlx5_core_dev *mdev,
4962 struct net_device *netdev,
4963 const struct mlx5e_profile *profile,
4964 void *ppriv)
6bfd390b
HHZ
4965{
4966 struct mlx5e_priv *priv = netdev_priv(netdev);
bbeb53b8 4967 struct mlx5e_rss_params *rss = &priv->rss_params;
547eede0 4968 int err;
6bfd390b 4969
519a0bf5 4970 err = mlx5e_netdev_init(netdev, priv, mdev, profile, ppriv);
182570b2
FD
4971 if (err)
4972 return err;
4973
db05815b 4974 mlx5e_build_nic_params(mdev, &priv->xsk, rss, &priv->channels.params,
bbeb53b8
AL
4975 mlx5e_get_netdev_max_channels(netdev),
4976 netdev->mtu);
519a0bf5
SM
4977
4978 mlx5e_timestamp_init(priv);
4979
547eede0
IT
4980 err = mlx5e_ipsec_init(priv);
4981 if (err)
4982 mlx5_core_err(mdev, "IPSec initialization failed, %d\n", err);
43585a41
IL
4983 err = mlx5e_tls_init(priv);
4984 if (err)
4985 mlx5_core_err(mdev, "TLS initialization failed, %d\n", err);
6bfd390b 4986 mlx5e_build_nic_netdev(netdev);
8bfaf07f 4987 mlx5e_build_tc2txq_maps(priv);
182570b2
FD
4988
4989 return 0;
6bfd390b
HHZ
4990}
4991
4992static void mlx5e_nic_cleanup(struct mlx5e_priv *priv)
4993{
43585a41 4994 mlx5e_tls_cleanup(priv);
547eede0 4995 mlx5e_ipsec_cleanup(priv);
182570b2 4996 mlx5e_netdev_cleanup(priv->netdev, priv);
6bfd390b
HHZ
4997}
4998
4999static int mlx5e_init_nic_rx(struct mlx5e_priv *priv)
5000{
5001 struct mlx5_core_dev *mdev = priv->mdev;
5002 int err;
6bfd390b 5003
1462e48d
RD
5004 mlx5e_create_q_counters(priv);
5005
5006 err = mlx5e_open_drop_rq(priv, &priv->drop_rq);
5007 if (err) {
5008 mlx5_core_err(mdev, "open drop rq failed, %d\n", err);
5009 goto err_destroy_q_counters;
5010 }
5011
8f493ffd
SM
5012 err = mlx5e_create_indirect_rqt(priv);
5013 if (err)
1462e48d 5014 goto err_close_drop_rq;
6bfd390b 5015
db05815b 5016 err = mlx5e_create_direct_rqts(priv, priv->direct_tir);
8f493ffd 5017 if (err)
6bfd390b 5018 goto err_destroy_indirect_rqts;
6bfd390b 5019
46dc933c 5020 err = mlx5e_create_indirect_tirs(priv, true);
8f493ffd 5021 if (err)
6bfd390b 5022 goto err_destroy_direct_rqts;
6bfd390b 5023
db05815b 5024 err = mlx5e_create_direct_tirs(priv, priv->direct_tir);
8f493ffd 5025 if (err)
6bfd390b 5026 goto err_destroy_indirect_tirs;
6bfd390b 5027
db05815b
MM
5028 err = mlx5e_create_direct_rqts(priv, priv->xsk_tir);
5029 if (unlikely(err))
5030 goto err_destroy_direct_tirs;
5031
5032 err = mlx5e_create_direct_tirs(priv, priv->xsk_tir);
5033 if (unlikely(err))
5034 goto err_destroy_xsk_rqts;
5035
6bfd390b
HHZ
5036 err = mlx5e_create_flow_steering(priv);
5037 if (err) {
5038 mlx5_core_warn(mdev, "create flow steering failed, %d\n", err);
db05815b 5039 goto err_destroy_xsk_tirs;
6bfd390b
HHZ
5040 }
5041
655dc3d2 5042 err = mlx5e_tc_nic_init(priv);
6bfd390b
HHZ
5043 if (err)
5044 goto err_destroy_flow_steering;
5045
5046 return 0;
5047
5048err_destroy_flow_steering:
5049 mlx5e_destroy_flow_steering(priv);
db05815b
MM
5050err_destroy_xsk_tirs:
5051 mlx5e_destroy_direct_tirs(priv, priv->xsk_tir);
5052err_destroy_xsk_rqts:
5053 mlx5e_destroy_direct_rqts(priv, priv->xsk_tir);
6bfd390b 5054err_destroy_direct_tirs:
db05815b 5055 mlx5e_destroy_direct_tirs(priv, priv->direct_tir);
6bfd390b 5056err_destroy_indirect_tirs:
46dc933c 5057 mlx5e_destroy_indirect_tirs(priv, true);
6bfd390b 5058err_destroy_direct_rqts:
db05815b 5059 mlx5e_destroy_direct_rqts(priv, priv->direct_tir);
6bfd390b
HHZ
5060err_destroy_indirect_rqts:
5061 mlx5e_destroy_rqt(priv, &priv->indir_rqt);
1462e48d
RD
5062err_close_drop_rq:
5063 mlx5e_close_drop_rq(&priv->drop_rq);
5064err_destroy_q_counters:
5065 mlx5e_destroy_q_counters(priv);
6bfd390b
HHZ
5066 return err;
5067}
5068
5069static void mlx5e_cleanup_nic_rx(struct mlx5e_priv *priv)
5070{
655dc3d2 5071 mlx5e_tc_nic_cleanup(priv);
6bfd390b 5072 mlx5e_destroy_flow_steering(priv);
db05815b
MM
5073 mlx5e_destroy_direct_tirs(priv, priv->xsk_tir);
5074 mlx5e_destroy_direct_rqts(priv, priv->xsk_tir);
5075 mlx5e_destroy_direct_tirs(priv, priv->direct_tir);
46dc933c 5076 mlx5e_destroy_indirect_tirs(priv, true);
db05815b 5077 mlx5e_destroy_direct_rqts(priv, priv->direct_tir);
6bfd390b 5078 mlx5e_destroy_rqt(priv, &priv->indir_rqt);
1462e48d
RD
5079 mlx5e_close_drop_rq(&priv->drop_rq);
5080 mlx5e_destroy_q_counters(priv);
6bfd390b
HHZ
5081}
5082
5083static int mlx5e_init_nic_tx(struct mlx5e_priv *priv)
5084{
5085 int err;
5086
5087 err = mlx5e_create_tises(priv);
5088 if (err) {
5089 mlx5_core_warn(priv->mdev, "create tises failed, %d\n", err);
5090 return err;
5091 }
5092
5093#ifdef CONFIG_MLX5_CORE_EN_DCB
e207b7e9 5094 mlx5e_dcbnl_initialize(priv);
6bfd390b 5095#endif
de8650a8 5096 mlx5e_tx_reporter_create(priv);
6bfd390b
HHZ
5097 return 0;
5098}
5099
5100static void mlx5e_nic_enable(struct mlx5e_priv *priv)
5101{
5102 struct net_device *netdev = priv->netdev;
5103 struct mlx5_core_dev *mdev = priv->mdev;
2c3b5bee
SM
5104
5105 mlx5e_init_l2_addr(priv);
5106
63bfd399
EBE
5107 /* Marking the link as currently not needed by the Driver */
5108 if (!netif_running(netdev))
5109 mlx5_set_port_admin_status(mdev, MLX5_PORT_DOWN);
5110
6d7ee2ed 5111 mlx5e_set_netdev_mtu_boundaries(priv);
2c3b5bee 5112 mlx5e_set_dev_port_mtu(priv);
6bfd390b 5113
7907f23a
AH
5114 mlx5_lag_add(mdev, netdev);
5115
6bfd390b 5116 mlx5e_enable_async_events(priv);
5c7e8bbb
ED
5117 if (mlx5e_monitor_counter_supported(priv))
5118 mlx5e_monitor_counter_init(priv);
127ea380 5119
610e89e0
SM
5120 if (netdev->reg_state != NETREG_REGISTERED)
5121 return;
2a5e7a13
HN
5122#ifdef CONFIG_MLX5_CORE_EN_DCB
5123 mlx5e_dcbnl_init_app(priv);
5124#endif
610e89e0
SM
5125
5126 queue_work(priv->wq, &priv->set_rx_mode_work);
2c3b5bee
SM
5127
5128 rtnl_lock();
5129 if (netif_running(netdev))
5130 mlx5e_open(netdev);
5131 netif_device_attach(netdev);
5132 rtnl_unlock();
6bfd390b
HHZ
5133}
5134
5135static void mlx5e_nic_disable(struct mlx5e_priv *priv)
5136{
3deef8ce 5137 struct mlx5_core_dev *mdev = priv->mdev;
3deef8ce 5138
2a5e7a13
HN
5139#ifdef CONFIG_MLX5_CORE_EN_DCB
5140 if (priv->netdev->reg_state == NETREG_REGISTERED)
5141 mlx5e_dcbnl_delete_app(priv);
5142#endif
5143
2c3b5bee
SM
5144 rtnl_lock();
5145 if (netif_running(priv->netdev))
5146 mlx5e_close(priv->netdev);
5147 netif_device_detach(priv->netdev);
5148 rtnl_unlock();
5149
6bfd390b 5150 queue_work(priv->wq, &priv->set_rx_mode_work);
1d447a39 5151
5c7e8bbb
ED
5152 if (mlx5e_monitor_counter_supported(priv))
5153 mlx5e_monitor_counter_cleanup(priv);
5154
6bfd390b 5155 mlx5e_disable_async_events(priv);
3deef8ce 5156 mlx5_lag_remove(mdev);
6bfd390b
HHZ
5157}
5158
a90f88fe
GT
5159int mlx5e_update_nic_rx(struct mlx5e_priv *priv)
5160{
5161 return mlx5e_refresh_tirs(priv, false);
5162}
5163
6bfd390b
HHZ
5164static const struct mlx5e_profile mlx5e_nic_profile = {
5165 .init = mlx5e_nic_init,
5166 .cleanup = mlx5e_nic_cleanup,
5167 .init_rx = mlx5e_init_nic_rx,
5168 .cleanup_rx = mlx5e_cleanup_nic_rx,
5169 .init_tx = mlx5e_init_nic_tx,
5170 .cleanup_tx = mlx5e_cleanup_nic_tx,
5171 .enable = mlx5e_nic_enable,
5172 .disable = mlx5e_nic_disable,
a90f88fe 5173 .update_rx = mlx5e_update_nic_rx,
3834a5e6 5174 .update_stats = mlx5e_update_ndo_stats,
7ca42c80 5175 .update_carrier = mlx5e_update_carrier,
20fd0c19
SM
5176 .rx_handlers.handle_rx_cqe = mlx5e_handle_rx_cqe,
5177 .rx_handlers.handle_rx_cqe_mpwqe = mlx5e_handle_rx_cqe_mpwrq,
6bfd390b
HHZ
5178 .max_tc = MLX5E_MAX_NUM_TC,
5179};
5180
2c3b5bee
SM
5181/* mlx5e generic netdev management API (move to en_common.c) */
5182
182570b2 5183/* mlx5e_netdev_init/cleanup must be called from profile->init/cleanup callbacks */
519a0bf5
SM
5184int mlx5e_netdev_init(struct net_device *netdev,
5185 struct mlx5e_priv *priv,
5186 struct mlx5_core_dev *mdev,
5187 const struct mlx5e_profile *profile,
5188 void *ppriv)
182570b2 5189{
519a0bf5
SM
5190 /* priv init */
5191 priv->mdev = mdev;
5192 priv->netdev = netdev;
5193 priv->profile = profile;
5194 priv->ppriv = ppriv;
5195 priv->msglevel = MLX5E_MSG_LEVEL;
5196 priv->max_opened_tc = 1;
182570b2 5197
519a0bf5
SM
5198 mutex_init(&priv->state_lock);
5199 INIT_WORK(&priv->update_carrier_work, mlx5e_update_carrier_work);
5200 INIT_WORK(&priv->set_rx_mode_work, mlx5e_set_rx_mode_work);
5201 INIT_WORK(&priv->tx_timeout_work, mlx5e_tx_timeout_work);
cdeef2b1 5202 INIT_WORK(&priv->update_stats_work, mlx5e_update_stats_work);
303211b4 5203
182570b2
FD
5204 priv->wq = create_singlethread_workqueue("mlx5e");
5205 if (!priv->wq)
5206 return -ENOMEM;
5207
519a0bf5
SM
5208 /* netdev init */
5209 netif_carrier_off(netdev);
5210
5211#ifdef CONFIG_MLX5_EN_ARFS
f2f3df55 5212 netdev->rx_cpu_rmap = mlx5_eq_table_get_rmap(mdev);
519a0bf5
SM
5213#endif
5214
182570b2
FD
5215 return 0;
5216}
5217
5218void mlx5e_netdev_cleanup(struct net_device *netdev, struct mlx5e_priv *priv)
5219{
5220 destroy_workqueue(priv->wq);
5221}
5222
26e59d80
MHY
5223struct net_device *mlx5e_create_netdev(struct mlx5_core_dev *mdev,
5224 const struct mlx5e_profile *profile,
779d986d 5225 int nch,
26e59d80 5226 void *ppriv)
f62b8bb8
AV
5227{
5228 struct net_device *netdev;
182570b2 5229 int err;
f62b8bb8 5230
08fb1dac 5231 netdev = alloc_etherdev_mqs(sizeof(struct mlx5e_priv),
6bfd390b 5232 nch * profile->max_tc,
db05815b 5233 nch * MLX5E_NUM_RQ_GROUPS);
f62b8bb8
AV
5234 if (!netdev) {
5235 mlx5_core_err(mdev, "alloc_etherdev_mqs() failed\n");
5236 return NULL;
5237 }
5238
182570b2
FD
5239 err = profile->init(mdev, netdev, profile, ppriv);
5240 if (err) {
5241 mlx5_core_err(mdev, "failed to init mlx5e profile %d\n", err);
5242 goto err_free_netdev;
5243 }
26e59d80
MHY
5244
5245 return netdev;
5246
182570b2 5247err_free_netdev:
26e59d80
MHY
5248 free_netdev(netdev);
5249
5250 return NULL;
5251}
5252
2c3b5bee 5253int mlx5e_attach_netdev(struct mlx5e_priv *priv)
26e59d80
MHY
5254{
5255 const struct mlx5e_profile *profile;
a1f240f1 5256 int max_nch;
26e59d80
MHY
5257 int err;
5258
26e59d80
MHY
5259 profile = priv->profile;
5260 clear_bit(MLX5E_STATE_DESTROYING, &priv->state);
7bb29755 5261
a1f240f1
YA
5262 /* max number of channels may have changed */
5263 max_nch = mlx5e_get_max_num_channels(priv->mdev);
5264 if (priv->channels.params.num_channels > max_nch) {
5265 mlx5_core_warn(priv->mdev, "MLX5E: Reducing number of channels to %d\n", max_nch);
5266 priv->channels.params.num_channels = max_nch;
bbeb53b8 5267 mlx5e_build_default_indir_rqt(priv->rss_params.indirection_rqt,
a1f240f1
YA
5268 MLX5E_INDIR_RQT_SIZE, max_nch);
5269 }
5270
6bfd390b
HHZ
5271 err = profile->init_tx(priv);
5272 if (err)
ec8b9981 5273 goto out;
5c50368f 5274
6bfd390b
HHZ
5275 err = profile->init_rx(priv);
5276 if (err)
1462e48d 5277 goto err_cleanup_tx;
5c50368f 5278
6bfd390b
HHZ
5279 if (profile->enable)
5280 profile->enable(priv);
f62b8bb8 5281
26e59d80 5282 return 0;
5c50368f 5283
1462e48d 5284err_cleanup_tx:
6bfd390b 5285 profile->cleanup_tx(priv);
5c50368f 5286
26e59d80
MHY
5287out:
5288 return err;
f62b8bb8
AV
5289}
5290
2c3b5bee 5291void mlx5e_detach_netdev(struct mlx5e_priv *priv)
26e59d80 5292{
26e59d80
MHY
5293 const struct mlx5e_profile *profile = priv->profile;
5294
5295 set_bit(MLX5E_STATE_DESTROYING, &priv->state);
26e59d80 5296
37f304d1
SM
5297 if (profile->disable)
5298 profile->disable(priv);
5299 flush_workqueue(priv->wq);
5300
26e59d80 5301 profile->cleanup_rx(priv);
26e59d80 5302 profile->cleanup_tx(priv);
cdeef2b1 5303 cancel_work_sync(&priv->update_stats_work);
26e59d80
MHY
5304}
5305
2c3b5bee
SM
5306void mlx5e_destroy_netdev(struct mlx5e_priv *priv)
5307{
5308 const struct mlx5e_profile *profile = priv->profile;
5309 struct net_device *netdev = priv->netdev;
5310
2c3b5bee
SM
5311 if (profile->cleanup)
5312 profile->cleanup(priv);
5313 free_netdev(netdev);
5314}
5315
26e59d80
MHY
5316/* mlx5e_attach and mlx5e_detach scope should be only creating/destroying
5317 * hardware contexts and to connect it to the current netdev.
5318 */
5319static int mlx5e_attach(struct mlx5_core_dev *mdev, void *vpriv)
5320{
5321 struct mlx5e_priv *priv = vpriv;
5322 struct net_device *netdev = priv->netdev;
5323 int err;
5324
5325 if (netif_device_present(netdev))
5326 return 0;
5327
5328 err = mlx5e_create_mdev_resources(mdev);
5329 if (err)
5330 return err;
5331
2c3b5bee 5332 err = mlx5e_attach_netdev(priv);
26e59d80
MHY
5333 if (err) {
5334 mlx5e_destroy_mdev_resources(mdev);
5335 return err;
5336 }
5337
5338 return 0;
5339}
5340
5341static void mlx5e_detach(struct mlx5_core_dev *mdev, void *vpriv)
5342{
5343 struct mlx5e_priv *priv = vpriv;
5344 struct net_device *netdev = priv->netdev;
5345
47c9d2c9
AH
5346#ifdef CONFIG_MLX5_ESWITCH
5347 if (MLX5_ESWITCH_MANAGER(mdev) && vpriv == mdev)
5348 return;
5349#endif
5350
26e59d80
MHY
5351 if (!netif_device_present(netdev))
5352 return;
5353
2c3b5bee 5354 mlx5e_detach_netdev(priv);
26e59d80
MHY
5355 mlx5e_destroy_mdev_resources(mdev);
5356}
5357
b50d292b
HHZ
5358static void *mlx5e_add(struct mlx5_core_dev *mdev)
5359{
07c9f1e5 5360 struct net_device *netdev;
26e59d80 5361 void *priv;
26e59d80 5362 int err;
779d986d 5363 int nch;
b50d292b 5364
26e59d80
MHY
5365 err = mlx5e_check_required_hca_cap(mdev);
5366 if (err)
b50d292b
HHZ
5367 return NULL;
5368
aec002f6
OG
5369#ifdef CONFIG_MLX5_ESWITCH
5370 if (MLX5_ESWITCH_MANAGER(mdev) &&
f6455de0 5371 mlx5_eswitch_mode(mdev->priv.eswitch) == MLX5_ESWITCH_OFFLOADS) {
aec002f6
OG
5372 mlx5e_rep_register_vport_reps(mdev);
5373 return mdev;
5374 }
5375#endif
5376
779d986d 5377 nch = mlx5e_get_max_num_channels(mdev);
13e509a4 5378 netdev = mlx5e_create_netdev(mdev, &mlx5e_nic_profile, nch, NULL);
26e59d80
MHY
5379 if (!netdev) {
5380 mlx5_core_err(mdev, "mlx5e_create_netdev failed\n");
13e509a4 5381 return NULL;
26e59d80
MHY
5382 }
5383
5384 priv = netdev_priv(netdev);
5385
5386 err = mlx5e_attach(mdev, priv);
5387 if (err) {
5388 mlx5_core_err(mdev, "mlx5e_attach failed, %d\n", err);
5389 goto err_destroy_netdev;
5390 }
5391
5392 err = register_netdev(netdev);
5393 if (err) {
5394 mlx5_core_err(mdev, "register_netdev failed, %d\n", err);
5395 goto err_detach;
b50d292b 5396 }
26e59d80 5397
2a5e7a13
HN
5398#ifdef CONFIG_MLX5_CORE_EN_DCB
5399 mlx5e_dcbnl_init_app(priv);
5400#endif
26e59d80
MHY
5401 return priv;
5402
5403err_detach:
5404 mlx5e_detach(mdev, priv);
26e59d80 5405err_destroy_netdev:
2c3b5bee 5406 mlx5e_destroy_netdev(priv);
26e59d80 5407 return NULL;
b50d292b
HHZ
5408}
5409
b50d292b
HHZ
5410static void mlx5e_remove(struct mlx5_core_dev *mdev, void *vpriv)
5411{
aec002f6 5412 struct mlx5e_priv *priv;
127ea380 5413
aec002f6
OG
5414#ifdef CONFIG_MLX5_ESWITCH
5415 if (MLX5_ESWITCH_MANAGER(mdev) && vpriv == mdev) {
5416 mlx5e_rep_unregister_vport_reps(mdev);
5417 return;
5418 }
5419#endif
5420 priv = vpriv;
2a5e7a13
HN
5421#ifdef CONFIG_MLX5_CORE_EN_DCB
5422 mlx5e_dcbnl_delete_app(priv);
5423#endif
5e1e93c7 5424 unregister_netdev(priv->netdev);
26e59d80 5425 mlx5e_detach(mdev, vpriv);
2c3b5bee 5426 mlx5e_destroy_netdev(priv);
b50d292b
HHZ
5427}
5428
f62b8bb8 5429static struct mlx5_interface mlx5e_interface = {
b50d292b
HHZ
5430 .add = mlx5e_add,
5431 .remove = mlx5e_remove,
26e59d80
MHY
5432 .attach = mlx5e_attach,
5433 .detach = mlx5e_detach,
f62b8bb8 5434 .protocol = MLX5_INTERFACE_PROTOCOL_ETH,
f62b8bb8
AV
5435};
5436
5437void mlx5e_init(void)
5438{
2ac9cfe7 5439 mlx5e_ipsec_build_inverse_table();
665bc539 5440 mlx5e_build_ptys2ethtool_map();
f62b8bb8
AV
5441 mlx5_register_interface(&mlx5e_interface);
5442}
5443
5444void mlx5e_cleanup(void)
5445{
5446 mlx5_unregister_interface(&mlx5e_interface);
5447}