]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blob - drivers/net/ethernet/mellanox/mlx5/core/en_main.c
net/mlx5e: Rx, Fix checksum calculation for new hardware
[mirror_ubuntu-bionic-kernel.git] / drivers / net / ethernet / mellanox / mlx5 / core / en_main.c
1 /*
2 * Copyright (c) 2015-2016, Mellanox Technologies. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
32
33 #include <net/tc_act/tc_gact.h>
34 #include <net/pkt_cls.h>
35 #include <linux/mlx5/fs.h>
36 #include <net/vxlan.h>
37 #include <linux/bpf.h>
38 #include "eswitch.h"
39 #include "en.h"
40 #include "en_tc.h"
41 #include "en_rep.h"
42 #include "en_accel/ipsec.h"
43 #include "en_accel/ipsec_rxtx.h"
44 #include "accel/ipsec.h"
45 #include "vxlan.h"
46
47 struct mlx5e_rq_param {
48 u32 rqc[MLX5_ST_SZ_DW(rqc)];
49 struct mlx5_wq_param wq;
50 };
51
52 struct mlx5e_sq_param {
53 u32 sqc[MLX5_ST_SZ_DW(sqc)];
54 struct mlx5_wq_param wq;
55 };
56
57 struct mlx5e_cq_param {
58 u32 cqc[MLX5_ST_SZ_DW(cqc)];
59 struct mlx5_wq_param wq;
60 u16 eq_ix;
61 u8 cq_period_mode;
62 };
63
64 struct mlx5e_channel_param {
65 struct mlx5e_rq_param rq;
66 struct mlx5e_sq_param sq;
67 struct mlx5e_sq_param xdp_sq;
68 struct mlx5e_sq_param icosq;
69 struct mlx5e_cq_param rx_cq;
70 struct mlx5e_cq_param tx_cq;
71 struct mlx5e_cq_param icosq_cq;
72 };
73
74 static bool mlx5e_check_fragmented_striding_rq_cap(struct mlx5_core_dev *mdev)
75 {
76 return MLX5_CAP_GEN(mdev, striding_rq) &&
77 MLX5_CAP_GEN(mdev, umr_ptr_rlky) &&
78 MLX5_CAP_ETH(mdev, reg_umr_sq);
79 }
80
81 void mlx5e_init_rq_type_params(struct mlx5_core_dev *mdev,
82 struct mlx5e_params *params, u8 rq_type)
83 {
84 params->rq_wq_type = rq_type;
85 params->lro_wqe_sz = MLX5E_PARAMS_DEFAULT_LRO_WQE_SZ;
86 switch (params->rq_wq_type) {
87 case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ:
88 params->log_rq_size = is_kdump_kernel() ?
89 MLX5E_PARAMS_MINIMUM_LOG_RQ_SIZE_MPW :
90 MLX5E_PARAMS_DEFAULT_LOG_RQ_SIZE_MPW;
91 params->mpwqe_log_stride_sz = MLX5E_MPWQE_STRIDE_SZ(mdev,
92 MLX5E_GET_PFLAG(params, MLX5E_PFLAG_RX_CQE_COMPRESS));
93 params->mpwqe_log_num_strides = MLX5_MPWRQ_LOG_WQE_SZ -
94 params->mpwqe_log_stride_sz;
95 break;
96 default: /* MLX5_WQ_TYPE_LINKED_LIST */
97 params->log_rq_size = is_kdump_kernel() ?
98 MLX5E_PARAMS_MINIMUM_LOG_RQ_SIZE :
99 MLX5E_PARAMS_DEFAULT_LOG_RQ_SIZE;
100 params->rq_headroom = params->xdp_prog ?
101 XDP_PACKET_HEADROOM : MLX5_RX_HEADROOM;
102 params->rq_headroom += NET_IP_ALIGN;
103
104 /* Extra room needed for build_skb */
105 params->lro_wqe_sz -= params->rq_headroom +
106 SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
107 }
108
109 mlx5_core_info(mdev, "MLX5E: StrdRq(%d) RqSz(%ld) StrdSz(%ld) RxCqeCmprss(%d)\n",
110 params->rq_wq_type == MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ,
111 BIT(params->log_rq_size),
112 BIT(params->mpwqe_log_stride_sz),
113 MLX5E_GET_PFLAG(params, MLX5E_PFLAG_RX_CQE_COMPRESS));
114 }
115
116 static void mlx5e_set_rq_params(struct mlx5_core_dev *mdev,
117 struct mlx5e_params *params)
118 {
119 u8 rq_type = mlx5e_check_fragmented_striding_rq_cap(mdev) &&
120 !params->xdp_prog && !MLX5_IPSEC_DEV(mdev) ?
121 MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ :
122 MLX5_WQ_TYPE_LINKED_LIST;
123 mlx5e_init_rq_type_params(mdev, params, rq_type);
124 }
125
126 static void mlx5e_update_carrier(struct mlx5e_priv *priv)
127 {
128 struct mlx5_core_dev *mdev = priv->mdev;
129 u8 port_state;
130
131 port_state = mlx5_query_vport_state(mdev,
132 MLX5_QUERY_VPORT_STATE_IN_OP_MOD_VNIC_VPORT,
133 0);
134
135 if (port_state == VPORT_STATE_UP) {
136 netdev_info(priv->netdev, "Link up\n");
137 netif_carrier_on(priv->netdev);
138 } else {
139 netdev_info(priv->netdev, "Link down\n");
140 netif_carrier_off(priv->netdev);
141 }
142 }
143
144 static void mlx5e_update_carrier_work(struct work_struct *work)
145 {
146 struct mlx5e_priv *priv = container_of(work, struct mlx5e_priv,
147 update_carrier_work);
148
149 mutex_lock(&priv->state_lock);
150 if (test_bit(MLX5E_STATE_OPENED, &priv->state))
151 if (priv->profile->update_carrier)
152 priv->profile->update_carrier(priv);
153 mutex_unlock(&priv->state_lock);
154 }
155
156 static void mlx5e_tx_timeout_work(struct work_struct *work)
157 {
158 struct mlx5e_priv *priv = container_of(work, struct mlx5e_priv,
159 tx_timeout_work);
160 int err;
161
162 rtnl_lock();
163 mutex_lock(&priv->state_lock);
164 if (!test_bit(MLX5E_STATE_OPENED, &priv->state))
165 goto unlock;
166 mlx5e_close_locked(priv->netdev);
167 err = mlx5e_open_locked(priv->netdev);
168 if (err)
169 netdev_err(priv->netdev, "mlx5e_open_locked failed recovering from a tx_timeout, err(%d).\n",
170 err);
171 unlock:
172 mutex_unlock(&priv->state_lock);
173 rtnl_unlock();
174 }
175
176 static void mlx5e_update_sw_counters(struct mlx5e_priv *priv)
177 {
178 struct mlx5e_sw_stats temp, *s = &temp;
179 struct mlx5e_rq_stats *rq_stats;
180 struct mlx5e_sq_stats *sq_stats;
181 int i, j;
182
183 memset(s, 0, sizeof(*s));
184 for (i = 0; i < priv->channels.num; i++) {
185 struct mlx5e_channel *c = priv->channels.c[i];
186
187 rq_stats = &c->rq.stats;
188
189 s->rx_packets += rq_stats->packets;
190 s->rx_bytes += rq_stats->bytes;
191 s->rx_lro_packets += rq_stats->lro_packets;
192 s->rx_lro_bytes += rq_stats->lro_bytes;
193 s->rx_ecn_mark += rq_stats->ecn_mark;
194 s->rx_removed_vlan_packets += rq_stats->removed_vlan_packets;
195 s->rx_csum_none += rq_stats->csum_none;
196 s->rx_csum_complete += rq_stats->csum_complete;
197 s->rx_csum_complete_tail += rq_stats->csum_complete_tail;
198 s->rx_csum_complete_tail_slow += rq_stats->csum_complete_tail_slow;
199 s->rx_csum_unnecessary += rq_stats->csum_unnecessary;
200 s->rx_csum_unnecessary_inner += rq_stats->csum_unnecessary_inner;
201 s->rx_xdp_drop += rq_stats->xdp_drop;
202 s->rx_xdp_tx += rq_stats->xdp_tx;
203 s->rx_xdp_tx_full += rq_stats->xdp_tx_full;
204 s->rx_wqe_err += rq_stats->wqe_err;
205 s->rx_mpwqe_filler += rq_stats->mpwqe_filler;
206 s->rx_buff_alloc_err += rq_stats->buff_alloc_err;
207 s->rx_cqe_compress_blks += rq_stats->cqe_compress_blks;
208 s->rx_cqe_compress_pkts += rq_stats->cqe_compress_pkts;
209 s->rx_page_reuse += rq_stats->page_reuse;
210 s->rx_cache_reuse += rq_stats->cache_reuse;
211 s->rx_cache_full += rq_stats->cache_full;
212 s->rx_cache_empty += rq_stats->cache_empty;
213 s->rx_cache_busy += rq_stats->cache_busy;
214 s->rx_cache_waive += rq_stats->cache_waive;
215
216 for (j = 0; j < priv->channels.params.num_tc; j++) {
217 sq_stats = &c->sq[j].stats;
218
219 s->tx_packets += sq_stats->packets;
220 s->tx_bytes += sq_stats->bytes;
221 s->tx_tso_packets += sq_stats->tso_packets;
222 s->tx_tso_bytes += sq_stats->tso_bytes;
223 s->tx_tso_inner_packets += sq_stats->tso_inner_packets;
224 s->tx_tso_inner_bytes += sq_stats->tso_inner_bytes;
225 s->tx_added_vlan_packets += sq_stats->added_vlan_packets;
226 s->tx_queue_stopped += sq_stats->stopped;
227 s->tx_queue_wake += sq_stats->wake;
228 s->tx_queue_dropped += sq_stats->dropped;
229 s->tx_xmit_more += sq_stats->xmit_more;
230 s->tx_csum_partial_inner += sq_stats->csum_partial_inner;
231 s->tx_csum_none += sq_stats->csum_none;
232 s->tx_csum_partial += sq_stats->csum_partial;
233 }
234 }
235
236 s->link_down_events_phy = MLX5_GET(ppcnt_reg,
237 priv->stats.pport.phy_counters,
238 counter_set.phys_layer_cntrs.link_down_events);
239 memcpy(&priv->stats.sw, s, sizeof(*s));
240 }
241
242 static void mlx5e_update_vport_counters(struct mlx5e_priv *priv)
243 {
244 int outlen = MLX5_ST_SZ_BYTES(query_vport_counter_out);
245 u32 *out = (u32 *)priv->stats.vport.query_vport_out;
246 u32 in[MLX5_ST_SZ_DW(query_vport_counter_in)] = {0};
247 struct mlx5_core_dev *mdev = priv->mdev;
248
249 MLX5_SET(query_vport_counter_in, in, opcode,
250 MLX5_CMD_OP_QUERY_VPORT_COUNTER);
251 MLX5_SET(query_vport_counter_in, in, op_mod, 0);
252 MLX5_SET(query_vport_counter_in, in, other_vport, 0);
253
254 mlx5_cmd_exec(mdev, in, sizeof(in), out, outlen);
255 }
256
257 static void mlx5e_update_pport_counters(struct mlx5e_priv *priv, bool full)
258 {
259 struct mlx5e_pport_stats *pstats = &priv->stats.pport;
260 struct mlx5_core_dev *mdev = priv->mdev;
261 u32 in[MLX5_ST_SZ_DW(ppcnt_reg)] = {0};
262 int sz = MLX5_ST_SZ_BYTES(ppcnt_reg);
263 int prio;
264 void *out;
265
266 MLX5_SET(ppcnt_reg, in, local_port, 1);
267
268 out = pstats->IEEE_802_3_counters;
269 MLX5_SET(ppcnt_reg, in, grp, MLX5_IEEE_802_3_COUNTERS_GROUP);
270 mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_PPCNT, 0, 0);
271
272 if (!full)
273 return;
274
275 out = pstats->RFC_2863_counters;
276 MLX5_SET(ppcnt_reg, in, grp, MLX5_RFC_2863_COUNTERS_GROUP);
277 mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_PPCNT, 0, 0);
278
279 out = pstats->RFC_2819_counters;
280 MLX5_SET(ppcnt_reg, in, grp, MLX5_RFC_2819_COUNTERS_GROUP);
281 mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_PPCNT, 0, 0);
282
283 out = pstats->phy_counters;
284 MLX5_SET(ppcnt_reg, in, grp, MLX5_PHYSICAL_LAYER_COUNTERS_GROUP);
285 mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_PPCNT, 0, 0);
286
287 if (MLX5_CAP_PCAM_FEATURE(mdev, ppcnt_statistical_group)) {
288 out = pstats->phy_statistical_counters;
289 MLX5_SET(ppcnt_reg, in, grp, MLX5_PHYSICAL_LAYER_STATISTICAL_GROUP);
290 mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_PPCNT, 0, 0);
291 }
292
293 if (MLX5_CAP_PCAM_FEATURE(mdev, rx_buffer_fullness_counters)) {
294 out = pstats->eth_ext_counters;
295 MLX5_SET(ppcnt_reg, in, grp, MLX5_ETHERNET_EXTENDED_COUNTERS_GROUP);
296 mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_PPCNT, 0, 0);
297 }
298
299 MLX5_SET(ppcnt_reg, in, grp, MLX5_PER_PRIORITY_COUNTERS_GROUP);
300 for (prio = 0; prio < NUM_PPORT_PRIO; prio++) {
301 out = pstats->per_prio_counters[prio];
302 MLX5_SET(ppcnt_reg, in, prio_tc, prio);
303 mlx5_core_access_reg(mdev, in, sz, out, sz,
304 MLX5_REG_PPCNT, 0, 0);
305 }
306 }
307
308 static void mlx5e_update_q_counter(struct mlx5e_priv *priv)
309 {
310 struct mlx5e_qcounter_stats *qcnt = &priv->stats.qcnt;
311 u32 out[MLX5_ST_SZ_DW(query_q_counter_out)];
312 int err;
313
314 if (!priv->q_counter)
315 return;
316
317 err = mlx5_core_query_q_counter(priv->mdev, priv->q_counter, 0, out, sizeof(out));
318 if (err)
319 return;
320
321 qcnt->rx_out_of_buffer = MLX5_GET(query_q_counter_out, out, out_of_buffer);
322 }
323
324 static void mlx5e_update_pcie_counters(struct mlx5e_priv *priv)
325 {
326 struct mlx5e_pcie_stats *pcie_stats = &priv->stats.pcie;
327 struct mlx5_core_dev *mdev = priv->mdev;
328 u32 in[MLX5_ST_SZ_DW(mpcnt_reg)] = {0};
329 int sz = MLX5_ST_SZ_BYTES(mpcnt_reg);
330 void *out;
331
332 if (!MLX5_CAP_MCAM_FEATURE(mdev, pcie_performance_group))
333 return;
334
335 out = pcie_stats->pcie_perf_counters;
336 MLX5_SET(mpcnt_reg, in, grp, MLX5_PCIE_PERFORMANCE_COUNTERS_GROUP);
337 mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_MPCNT, 0, 0);
338 }
339
340 void mlx5e_update_stats(struct mlx5e_priv *priv, bool full)
341 {
342 if (full) {
343 mlx5e_update_pcie_counters(priv);
344 mlx5e_ipsec_update_stats(priv);
345 }
346 mlx5e_update_pport_counters(priv, full);
347 mlx5e_update_vport_counters(priv);
348 mlx5e_update_q_counter(priv);
349 mlx5e_update_sw_counters(priv);
350 }
351
352 static void mlx5e_update_ndo_stats(struct mlx5e_priv *priv)
353 {
354 mlx5e_update_stats(priv, false);
355 }
356
357 void mlx5e_update_stats_work(struct work_struct *work)
358 {
359 struct delayed_work *dwork = to_delayed_work(work);
360 struct mlx5e_priv *priv = container_of(dwork, struct mlx5e_priv,
361 update_stats_work);
362 mutex_lock(&priv->state_lock);
363 if (test_bit(MLX5E_STATE_OPENED, &priv->state)) {
364 priv->profile->update_stats(priv);
365 queue_delayed_work(priv->wq, dwork,
366 msecs_to_jiffies(MLX5E_UPDATE_STATS_INTERVAL));
367 }
368 mutex_unlock(&priv->state_lock);
369 }
370
371 static void mlx5e_async_event(struct mlx5_core_dev *mdev, void *vpriv,
372 enum mlx5_dev_event event, unsigned long param)
373 {
374 struct mlx5e_priv *priv = vpriv;
375
376 if (!test_bit(MLX5E_STATE_ASYNC_EVENTS_ENABLED, &priv->state))
377 return;
378
379 switch (event) {
380 case MLX5_DEV_EVENT_PORT_UP:
381 case MLX5_DEV_EVENT_PORT_DOWN:
382 queue_work(priv->wq, &priv->update_carrier_work);
383 break;
384 default:
385 break;
386 }
387 }
388
389 static void mlx5e_enable_async_events(struct mlx5e_priv *priv)
390 {
391 set_bit(MLX5E_STATE_ASYNC_EVENTS_ENABLED, &priv->state);
392 }
393
394 static void mlx5e_disable_async_events(struct mlx5e_priv *priv)
395 {
396 clear_bit(MLX5E_STATE_ASYNC_EVENTS_ENABLED, &priv->state);
397 synchronize_irq(pci_irq_vector(priv->mdev->pdev, MLX5_EQ_VEC_ASYNC));
398 }
399
400 static inline int mlx5e_get_wqe_mtt_sz(void)
401 {
402 /* UMR copies MTTs in units of MLX5_UMR_MTT_ALIGNMENT bytes.
403 * To avoid copying garbage after the mtt array, we allocate
404 * a little more.
405 */
406 return ALIGN(MLX5_MPWRQ_PAGES_PER_WQE * sizeof(__be64),
407 MLX5_UMR_MTT_ALIGNMENT);
408 }
409
410 static inline void mlx5e_build_umr_wqe(struct mlx5e_rq *rq,
411 struct mlx5e_icosq *sq,
412 struct mlx5e_umr_wqe *wqe,
413 u16 ix)
414 {
415 struct mlx5_wqe_ctrl_seg *cseg = &wqe->ctrl;
416 struct mlx5_wqe_umr_ctrl_seg *ucseg = &wqe->uctrl;
417 struct mlx5_wqe_data_seg *dseg = &wqe->data;
418 struct mlx5e_mpw_info *wi = &rq->mpwqe.info[ix];
419 u8 ds_cnt = DIV_ROUND_UP(sizeof(*wqe), MLX5_SEND_WQE_DS);
420 u32 umr_wqe_mtt_offset = mlx5e_get_wqe_mtt_offset(rq, ix);
421
422 cseg->qpn_ds = cpu_to_be32((sq->sqn << MLX5_WQE_CTRL_QPN_SHIFT) |
423 ds_cnt);
424 cseg->fm_ce_se = MLX5_WQE_CTRL_CQ_UPDATE;
425 cseg->imm = rq->mkey_be;
426
427 ucseg->flags = MLX5_UMR_TRANSLATION_OFFSET_EN;
428 ucseg->xlt_octowords =
429 cpu_to_be16(MLX5_MTT_OCTW(MLX5_MPWRQ_PAGES_PER_WQE));
430 ucseg->bsf_octowords =
431 cpu_to_be16(MLX5_MTT_OCTW(umr_wqe_mtt_offset));
432 ucseg->mkey_mask = cpu_to_be64(MLX5_MKEY_MASK_FREE);
433
434 dseg->lkey = sq->mkey_be;
435 dseg->addr = cpu_to_be64(wi->umr.mtt_addr);
436 }
437
438 static int mlx5e_rq_alloc_mpwqe_info(struct mlx5e_rq *rq,
439 struct mlx5e_channel *c)
440 {
441 int wq_sz = mlx5_wq_ll_get_size(&rq->wq);
442 int mtt_sz = mlx5e_get_wqe_mtt_sz();
443 int mtt_alloc = mtt_sz + MLX5_UMR_ALIGN - 1;
444 int i;
445
446 rq->mpwqe.info = kzalloc_node(wq_sz * sizeof(*rq->mpwqe.info),
447 GFP_KERNEL, cpu_to_node(c->cpu));
448 if (!rq->mpwqe.info)
449 goto err_out;
450
451 /* We allocate more than mtt_sz as we will align the pointer */
452 rq->mpwqe.mtt_no_align = kzalloc_node(mtt_alloc * wq_sz, GFP_KERNEL,
453 cpu_to_node(c->cpu));
454 if (unlikely(!rq->mpwqe.mtt_no_align))
455 goto err_free_wqe_info;
456
457 for (i = 0; i < wq_sz; i++) {
458 struct mlx5e_mpw_info *wi = &rq->mpwqe.info[i];
459
460 wi->umr.mtt = PTR_ALIGN(rq->mpwqe.mtt_no_align + i * mtt_alloc,
461 MLX5_UMR_ALIGN);
462 wi->umr.mtt_addr = dma_map_single(c->pdev, wi->umr.mtt, mtt_sz,
463 PCI_DMA_TODEVICE);
464 if (unlikely(dma_mapping_error(c->pdev, wi->umr.mtt_addr)))
465 goto err_unmap_mtts;
466
467 mlx5e_build_umr_wqe(rq, &c->icosq, &wi->umr.wqe, i);
468 }
469
470 return 0;
471
472 err_unmap_mtts:
473 while (--i >= 0) {
474 struct mlx5e_mpw_info *wi = &rq->mpwqe.info[i];
475
476 dma_unmap_single(c->pdev, wi->umr.mtt_addr, mtt_sz,
477 PCI_DMA_TODEVICE);
478 }
479 kfree(rq->mpwqe.mtt_no_align);
480 err_free_wqe_info:
481 kfree(rq->mpwqe.info);
482
483 err_out:
484 return -ENOMEM;
485 }
486
487 static void mlx5e_rq_free_mpwqe_info(struct mlx5e_rq *rq)
488 {
489 int wq_sz = mlx5_wq_ll_get_size(&rq->wq);
490 int mtt_sz = mlx5e_get_wqe_mtt_sz();
491 int i;
492
493 for (i = 0; i < wq_sz; i++) {
494 struct mlx5e_mpw_info *wi = &rq->mpwqe.info[i];
495
496 dma_unmap_single(rq->pdev, wi->umr.mtt_addr, mtt_sz,
497 PCI_DMA_TODEVICE);
498 }
499 kfree(rq->mpwqe.mtt_no_align);
500 kfree(rq->mpwqe.info);
501 }
502
503 static int mlx5e_create_umr_mkey(struct mlx5_core_dev *mdev,
504 u64 npages, u8 page_shift,
505 struct mlx5_core_mkey *umr_mkey)
506 {
507 int inlen = MLX5_ST_SZ_BYTES(create_mkey_in);
508 void *mkc;
509 u32 *in;
510 int err;
511
512 if (!MLX5E_VALID_NUM_MTTS(npages))
513 return -EINVAL;
514
515 in = kvzalloc(inlen, GFP_KERNEL);
516 if (!in)
517 return -ENOMEM;
518
519 mkc = MLX5_ADDR_OF(create_mkey_in, in, memory_key_mkey_entry);
520
521 MLX5_SET(mkc, mkc, free, 1);
522 MLX5_SET(mkc, mkc, umr_en, 1);
523 MLX5_SET(mkc, mkc, lw, 1);
524 MLX5_SET(mkc, mkc, lr, 1);
525 MLX5_SET(mkc, mkc, access_mode, MLX5_MKC_ACCESS_MODE_MTT);
526
527 MLX5_SET(mkc, mkc, qpn, 0xffffff);
528 MLX5_SET(mkc, mkc, pd, mdev->mlx5e_res.pdn);
529 MLX5_SET64(mkc, mkc, len, npages << page_shift);
530 MLX5_SET(mkc, mkc, translations_octword_size,
531 MLX5_MTT_OCTW(npages));
532 MLX5_SET(mkc, mkc, log_page_size, page_shift);
533
534 err = mlx5_core_create_mkey(mdev, umr_mkey, in, inlen);
535
536 kvfree(in);
537 return err;
538 }
539
540 static int mlx5e_create_rq_umr_mkey(struct mlx5_core_dev *mdev, struct mlx5e_rq *rq)
541 {
542 u64 num_mtts = MLX5E_REQUIRED_MTTS(mlx5_wq_ll_get_size(&rq->wq));
543
544 return mlx5e_create_umr_mkey(mdev, num_mtts, PAGE_SHIFT, &rq->umr_mkey);
545 }
546
547 static int mlx5e_alloc_rq(struct mlx5e_channel *c,
548 struct mlx5e_params *params,
549 struct mlx5e_rq_param *rqp,
550 struct mlx5e_rq *rq)
551 {
552 struct mlx5_core_dev *mdev = c->mdev;
553 void *rqc = rqp->rqc;
554 void *rqc_wq = MLX5_ADDR_OF(rqc, rqc, wq);
555 u32 byte_count;
556 int npages;
557 int wq_sz;
558 int err;
559 int i;
560
561 rqp->wq.db_numa_node = cpu_to_node(c->cpu);
562
563 err = mlx5_wq_ll_create(mdev, &rqp->wq, rqc_wq, &rq->wq,
564 &rq->wq_ctrl);
565 if (err)
566 return err;
567
568 rq->wq.db = &rq->wq.db[MLX5_RCV_DBR];
569
570 wq_sz = mlx5_wq_ll_get_size(&rq->wq);
571
572 rq->wq_type = params->rq_wq_type;
573 rq->pdev = c->pdev;
574 rq->netdev = c->netdev;
575 rq->tstamp = c->tstamp;
576 rq->clock = &mdev->clock;
577 rq->channel = c;
578 rq->ix = c->ix;
579 rq->mdev = mdev;
580
581 rq->xdp_prog = params->xdp_prog ? bpf_prog_inc(params->xdp_prog) : NULL;
582 if (IS_ERR(rq->xdp_prog)) {
583 err = PTR_ERR(rq->xdp_prog);
584 rq->xdp_prog = NULL;
585 goto err_rq_wq_destroy;
586 }
587
588 rq->buff.map_dir = rq->xdp_prog ? DMA_BIDIRECTIONAL : DMA_FROM_DEVICE;
589 rq->buff.headroom = params->rq_headroom;
590
591 switch (rq->wq_type) {
592 case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ:
593
594 rq->post_wqes = mlx5e_post_rx_mpwqes;
595 rq->dealloc_wqe = mlx5e_dealloc_rx_mpwqe;
596
597 rq->handle_rx_cqe = c->priv->profile->rx_handlers.handle_rx_cqe_mpwqe;
598 #ifdef CONFIG_MLX5_EN_IPSEC
599 if (MLX5_IPSEC_DEV(mdev)) {
600 err = -EINVAL;
601 netdev_err(c->netdev, "MPWQE RQ with IPSec offload not supported\n");
602 goto err_rq_wq_destroy;
603 }
604 #endif
605 if (!rq->handle_rx_cqe) {
606 err = -EINVAL;
607 netdev_err(c->netdev, "RX handler of MPWQE RQ is not set, err %d\n", err);
608 goto err_rq_wq_destroy;
609 }
610
611 rq->mpwqe.log_stride_sz = params->mpwqe_log_stride_sz;
612 rq->mpwqe.num_strides = BIT(params->mpwqe_log_num_strides);
613
614 byte_count = rq->mpwqe.num_strides << rq->mpwqe.log_stride_sz;
615
616 err = mlx5e_create_rq_umr_mkey(mdev, rq);
617 if (err)
618 goto err_rq_wq_destroy;
619 rq->mkey_be = cpu_to_be32(rq->umr_mkey.key);
620
621 err = mlx5e_rq_alloc_mpwqe_info(rq, c);
622 if (err)
623 goto err_destroy_umr_mkey;
624 break;
625 default: /* MLX5_WQ_TYPE_LINKED_LIST */
626 rq->wqe.frag_info =
627 kzalloc_node(wq_sz * sizeof(*rq->wqe.frag_info),
628 GFP_KERNEL, cpu_to_node(c->cpu));
629 if (!rq->wqe.frag_info) {
630 err = -ENOMEM;
631 goto err_rq_wq_destroy;
632 }
633 rq->post_wqes = mlx5e_post_rx_wqes;
634 rq->dealloc_wqe = mlx5e_dealloc_rx_wqe;
635
636 #ifdef CONFIG_MLX5_EN_IPSEC
637 if (c->priv->ipsec)
638 rq->handle_rx_cqe = mlx5e_ipsec_handle_rx_cqe;
639 else
640 #endif
641 rq->handle_rx_cqe = c->priv->profile->rx_handlers.handle_rx_cqe;
642 if (!rq->handle_rx_cqe) {
643 kfree(rq->wqe.frag_info);
644 err = -EINVAL;
645 netdev_err(c->netdev, "RX handler of RQ is not set, err %d\n", err);
646 goto err_rq_wq_destroy;
647 }
648
649 byte_count = params->lro_en ?
650 params->lro_wqe_sz :
651 MLX5E_SW2HW_MTU(c->priv, c->netdev->mtu);
652 #ifdef CONFIG_MLX5_EN_IPSEC
653 if (MLX5_IPSEC_DEV(mdev))
654 byte_count += MLX5E_METADATA_ETHER_LEN;
655 #endif
656 rq->wqe.page_reuse = !params->xdp_prog && !params->lro_en;
657
658 /* calc the required page order */
659 rq->wqe.frag_sz = MLX5_SKB_FRAG_SZ(rq->buff.headroom + byte_count);
660 npages = DIV_ROUND_UP(rq->wqe.frag_sz, PAGE_SIZE);
661 rq->buff.page_order = order_base_2(npages);
662
663 byte_count |= MLX5_HW_START_PADDING;
664 rq->mkey_be = c->mkey_be;
665 }
666
667 for (i = 0; i < wq_sz; i++) {
668 struct mlx5e_rx_wqe *wqe = mlx5_wq_ll_get_wqe(&rq->wq, i);
669
670 if (rq->wq_type == MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ) {
671 u64 dma_offset = (u64)mlx5e_get_wqe_mtt_offset(rq, i) << PAGE_SHIFT;
672
673 wqe->data.addr = cpu_to_be64(dma_offset);
674 }
675
676 wqe->data.byte_count = cpu_to_be32(byte_count);
677 wqe->data.lkey = rq->mkey_be;
678 }
679
680 INIT_WORK(&rq->am.work, mlx5e_rx_am_work);
681 rq->am.mode = params->rx_cq_moderation.cq_period_mode;
682 rq->page_cache.head = 0;
683 rq->page_cache.tail = 0;
684
685 return 0;
686
687 err_destroy_umr_mkey:
688 mlx5_core_destroy_mkey(mdev, &rq->umr_mkey);
689
690 err_rq_wq_destroy:
691 if (rq->xdp_prog)
692 bpf_prog_put(rq->xdp_prog);
693 mlx5_wq_destroy(&rq->wq_ctrl);
694
695 return err;
696 }
697
698 static void mlx5e_free_rq(struct mlx5e_rq *rq)
699 {
700 int i;
701
702 if (rq->xdp_prog)
703 bpf_prog_put(rq->xdp_prog);
704
705 switch (rq->wq_type) {
706 case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ:
707 mlx5e_rq_free_mpwqe_info(rq);
708 mlx5_core_destroy_mkey(rq->mdev, &rq->umr_mkey);
709 break;
710 default: /* MLX5_WQ_TYPE_LINKED_LIST */
711 kfree(rq->wqe.frag_info);
712 }
713
714 for (i = rq->page_cache.head; i != rq->page_cache.tail;
715 i = (i + 1) & (MLX5E_CACHE_SIZE - 1)) {
716 struct mlx5e_dma_info *dma_info = &rq->page_cache.page_cache[i];
717
718 mlx5e_page_release(rq, dma_info, false);
719 }
720 mlx5_wq_destroy(&rq->wq_ctrl);
721 }
722
723 static int mlx5e_create_rq(struct mlx5e_rq *rq,
724 struct mlx5e_rq_param *param)
725 {
726 struct mlx5_core_dev *mdev = rq->mdev;
727
728 void *in;
729 void *rqc;
730 void *wq;
731 int inlen;
732 int err;
733
734 inlen = MLX5_ST_SZ_BYTES(create_rq_in) +
735 sizeof(u64) * rq->wq_ctrl.buf.npages;
736 in = kvzalloc(inlen, GFP_KERNEL);
737 if (!in)
738 return -ENOMEM;
739
740 rqc = MLX5_ADDR_OF(create_rq_in, in, ctx);
741 wq = MLX5_ADDR_OF(rqc, rqc, wq);
742
743 memcpy(rqc, param->rqc, sizeof(param->rqc));
744
745 MLX5_SET(rqc, rqc, cqn, rq->cq.mcq.cqn);
746 MLX5_SET(rqc, rqc, state, MLX5_RQC_STATE_RST);
747 MLX5_SET(wq, wq, log_wq_pg_sz, rq->wq_ctrl.buf.page_shift -
748 MLX5_ADAPTER_PAGE_SHIFT);
749 MLX5_SET64(wq, wq, dbr_addr, rq->wq_ctrl.db.dma);
750
751 mlx5_fill_page_array(&rq->wq_ctrl.buf,
752 (__be64 *)MLX5_ADDR_OF(wq, wq, pas));
753
754 err = mlx5_core_create_rq(mdev, in, inlen, &rq->rqn);
755
756 kvfree(in);
757
758 return err;
759 }
760
761 static int mlx5e_modify_rq_state(struct mlx5e_rq *rq, int curr_state,
762 int next_state)
763 {
764 struct mlx5e_channel *c = rq->channel;
765 struct mlx5_core_dev *mdev = c->mdev;
766
767 void *in;
768 void *rqc;
769 int inlen;
770 int err;
771
772 inlen = MLX5_ST_SZ_BYTES(modify_rq_in);
773 in = kvzalloc(inlen, GFP_KERNEL);
774 if (!in)
775 return -ENOMEM;
776
777 rqc = MLX5_ADDR_OF(modify_rq_in, in, ctx);
778
779 MLX5_SET(modify_rq_in, in, rq_state, curr_state);
780 MLX5_SET(rqc, rqc, state, next_state);
781
782 err = mlx5_core_modify_rq(mdev, rq->rqn, in, inlen);
783
784 kvfree(in);
785
786 return err;
787 }
788
789 static int mlx5e_modify_rq_scatter_fcs(struct mlx5e_rq *rq, bool enable)
790 {
791 struct mlx5e_channel *c = rq->channel;
792 struct mlx5e_priv *priv = c->priv;
793 struct mlx5_core_dev *mdev = priv->mdev;
794
795 void *in;
796 void *rqc;
797 int inlen;
798 int err;
799
800 inlen = MLX5_ST_SZ_BYTES(modify_rq_in);
801 in = kvzalloc(inlen, GFP_KERNEL);
802 if (!in)
803 return -ENOMEM;
804
805 rqc = MLX5_ADDR_OF(modify_rq_in, in, ctx);
806
807 MLX5_SET(modify_rq_in, in, rq_state, MLX5_RQC_STATE_RDY);
808 MLX5_SET64(modify_rq_in, in, modify_bitmask,
809 MLX5_MODIFY_RQ_IN_MODIFY_BITMASK_SCATTER_FCS);
810 MLX5_SET(rqc, rqc, scatter_fcs, enable);
811 MLX5_SET(rqc, rqc, state, MLX5_RQC_STATE_RDY);
812
813 err = mlx5_core_modify_rq(mdev, rq->rqn, in, inlen);
814
815 kvfree(in);
816
817 return err;
818 }
819
820 static int mlx5e_modify_rq_vsd(struct mlx5e_rq *rq, bool vsd)
821 {
822 struct mlx5e_channel *c = rq->channel;
823 struct mlx5_core_dev *mdev = c->mdev;
824 void *in;
825 void *rqc;
826 int inlen;
827 int err;
828
829 inlen = MLX5_ST_SZ_BYTES(modify_rq_in);
830 in = kvzalloc(inlen, GFP_KERNEL);
831 if (!in)
832 return -ENOMEM;
833
834 rqc = MLX5_ADDR_OF(modify_rq_in, in, ctx);
835
836 MLX5_SET(modify_rq_in, in, rq_state, MLX5_RQC_STATE_RDY);
837 MLX5_SET64(modify_rq_in, in, modify_bitmask,
838 MLX5_MODIFY_RQ_IN_MODIFY_BITMASK_VSD);
839 MLX5_SET(rqc, rqc, vsd, vsd);
840 MLX5_SET(rqc, rqc, state, MLX5_RQC_STATE_RDY);
841
842 err = mlx5_core_modify_rq(mdev, rq->rqn, in, inlen);
843
844 kvfree(in);
845
846 return err;
847 }
848
849 static void mlx5e_destroy_rq(struct mlx5e_rq *rq)
850 {
851 mlx5_core_destroy_rq(rq->mdev, rq->rqn);
852 }
853
854 static int mlx5e_wait_for_min_rx_wqes(struct mlx5e_rq *rq)
855 {
856 unsigned long exp_time = jiffies + msecs_to_jiffies(20000);
857 struct mlx5e_channel *c = rq->channel;
858
859 struct mlx5_wq_ll *wq = &rq->wq;
860 u16 min_wqes = mlx5_min_rx_wqes(rq->wq_type, mlx5_wq_ll_get_size(wq));
861
862 while (time_before(jiffies, exp_time)) {
863 if (wq->cur_sz >= min_wqes)
864 return 0;
865
866 msleep(20);
867 }
868
869 netdev_warn(c->netdev, "Failed to get min RX wqes on RQN[0x%x] wq cur_sz(%d) min_rx_wqes(%d)\n",
870 rq->rqn, wq->cur_sz, min_wqes);
871 return -ETIMEDOUT;
872 }
873
874 static void mlx5e_free_rx_descs(struct mlx5e_rq *rq)
875 {
876 struct mlx5_wq_ll *wq = &rq->wq;
877 struct mlx5e_rx_wqe *wqe;
878 __be16 wqe_ix_be;
879 u16 wqe_ix;
880
881 /* UMR WQE (if in progress) is always at wq->head */
882 if (rq->wq_type == MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ &&
883 rq->mpwqe.umr_in_progress)
884 mlx5e_free_rx_mpwqe(rq, &rq->mpwqe.info[wq->head]);
885
886 while (!mlx5_wq_ll_is_empty(wq)) {
887 wqe_ix_be = *wq->tail_next;
888 wqe_ix = be16_to_cpu(wqe_ix_be);
889 wqe = mlx5_wq_ll_get_wqe(&rq->wq, wqe_ix);
890 rq->dealloc_wqe(rq, wqe_ix);
891 mlx5_wq_ll_pop(&rq->wq, wqe_ix_be,
892 &wqe->next.next_wqe_index);
893 }
894
895 if (rq->wq_type == MLX5_WQ_TYPE_LINKED_LIST && rq->wqe.page_reuse) {
896 /* Clean outstanding pages on handled WQEs that decided to do page-reuse,
897 * but yet to be re-posted.
898 */
899 int wq_sz = mlx5_wq_ll_get_size(&rq->wq);
900
901 for (wqe_ix = 0; wqe_ix < wq_sz; wqe_ix++)
902 rq->dealloc_wqe(rq, wqe_ix);
903 }
904 }
905
906 static int mlx5e_open_rq(struct mlx5e_channel *c,
907 struct mlx5e_params *params,
908 struct mlx5e_rq_param *param,
909 struct mlx5e_rq *rq)
910 {
911 int err;
912
913 err = mlx5e_alloc_rq(c, params, param, rq);
914 if (err)
915 return err;
916
917 err = mlx5e_create_rq(rq, param);
918 if (err)
919 goto err_free_rq;
920
921 err = mlx5e_modify_rq_state(rq, MLX5_RQC_STATE_RST, MLX5_RQC_STATE_RDY);
922 if (err)
923 goto err_destroy_rq;
924
925 if (params->rx_am_enabled)
926 c->rq.state |= BIT(MLX5E_RQ_STATE_AM);
927
928 if (MLX5_CAP_ETH(c->mdev, cqe_checksum_full))
929 __set_bit(MLX5E_RQ_STATE_CSUM_FULL, &c->rq.state);
930
931 /* We disable csum_complete when XDP is enabled since
932 * XDP programs might manipulate packets which will render
933 * skb->checksum incorrect.
934 */
935 if (MLX5E_GET_PFLAG(params, MLX5E_PFLAG_RX_NO_CSUM_COMPLETE) || c->xdp)
936 __set_bit(MLX5E_RQ_STATE_NO_CSUM_COMPLETE, &c->rq.state);
937
938 return 0;
939
940 err_destroy_rq:
941 mlx5e_destroy_rq(rq);
942 err_free_rq:
943 mlx5e_free_rq(rq);
944
945 return err;
946 }
947
948 static void mlx5e_activate_rq(struct mlx5e_rq *rq)
949 {
950 struct mlx5e_icosq *sq = &rq->channel->icosq;
951 u16 pi = sq->pc & sq->wq.sz_m1;
952 struct mlx5e_tx_wqe *nopwqe;
953
954 set_bit(MLX5E_RQ_STATE_ENABLED, &rq->state);
955 sq->db.ico_wqe[pi].opcode = MLX5_OPCODE_NOP;
956 nopwqe = mlx5e_post_nop(&sq->wq, sq->sqn, &sq->pc);
957 mlx5e_notify_hw(&sq->wq, sq->pc, sq->uar_map, &nopwqe->ctrl);
958 }
959
960 static void mlx5e_deactivate_rq(struct mlx5e_rq *rq)
961 {
962 clear_bit(MLX5E_RQ_STATE_ENABLED, &rq->state);
963 napi_synchronize(&rq->channel->napi); /* prevent mlx5e_post_rx_wqes */
964 }
965
966 static void mlx5e_close_rq(struct mlx5e_rq *rq)
967 {
968 cancel_work_sync(&rq->am.work);
969 mlx5e_destroy_rq(rq);
970 mlx5e_free_rx_descs(rq);
971 mlx5e_free_rq(rq);
972 }
973
974 static void mlx5e_free_xdpsq_db(struct mlx5e_xdpsq *sq)
975 {
976 kfree(sq->db.di);
977 }
978
979 static int mlx5e_alloc_xdpsq_db(struct mlx5e_xdpsq *sq, int numa)
980 {
981 int wq_sz = mlx5_wq_cyc_get_size(&sq->wq);
982
983 sq->db.di = kzalloc_node(sizeof(*sq->db.di) * wq_sz,
984 GFP_KERNEL, numa);
985 if (!sq->db.di) {
986 mlx5e_free_xdpsq_db(sq);
987 return -ENOMEM;
988 }
989
990 return 0;
991 }
992
993 static int mlx5e_alloc_xdpsq(struct mlx5e_channel *c,
994 struct mlx5e_params *params,
995 struct mlx5e_sq_param *param,
996 struct mlx5e_xdpsq *sq)
997 {
998 void *sqc_wq = MLX5_ADDR_OF(sqc, param->sqc, wq);
999 struct mlx5_core_dev *mdev = c->mdev;
1000 int err;
1001
1002 sq->pdev = c->pdev;
1003 sq->mkey_be = c->mkey_be;
1004 sq->channel = c;
1005 sq->uar_map = mdev->mlx5e_res.bfreg.map;
1006 sq->min_inline_mode = params->tx_min_inline_mode;
1007
1008 param->wq.db_numa_node = cpu_to_node(c->cpu);
1009 err = mlx5_wq_cyc_create(mdev, &param->wq, sqc_wq, &sq->wq, &sq->wq_ctrl);
1010 if (err)
1011 return err;
1012 sq->wq.db = &sq->wq.db[MLX5_SND_DBR];
1013
1014 err = mlx5e_alloc_xdpsq_db(sq, cpu_to_node(c->cpu));
1015 if (err)
1016 goto err_sq_wq_destroy;
1017
1018 return 0;
1019
1020 err_sq_wq_destroy:
1021 mlx5_wq_destroy(&sq->wq_ctrl);
1022
1023 return err;
1024 }
1025
1026 static void mlx5e_free_xdpsq(struct mlx5e_xdpsq *sq)
1027 {
1028 mlx5e_free_xdpsq_db(sq);
1029 mlx5_wq_destroy(&sq->wq_ctrl);
1030 }
1031
1032 static void mlx5e_free_icosq_db(struct mlx5e_icosq *sq)
1033 {
1034 kfree(sq->db.ico_wqe);
1035 }
1036
1037 static int mlx5e_alloc_icosq_db(struct mlx5e_icosq *sq, int numa)
1038 {
1039 u8 wq_sz = mlx5_wq_cyc_get_size(&sq->wq);
1040
1041 sq->db.ico_wqe = kzalloc_node(sizeof(*sq->db.ico_wqe) * wq_sz,
1042 GFP_KERNEL, numa);
1043 if (!sq->db.ico_wqe)
1044 return -ENOMEM;
1045
1046 return 0;
1047 }
1048
1049 static int mlx5e_alloc_icosq(struct mlx5e_channel *c,
1050 struct mlx5e_sq_param *param,
1051 struct mlx5e_icosq *sq)
1052 {
1053 void *sqc_wq = MLX5_ADDR_OF(sqc, param->sqc, wq);
1054 struct mlx5_core_dev *mdev = c->mdev;
1055 int err;
1056
1057 sq->mkey_be = c->mkey_be;
1058 sq->channel = c;
1059 sq->uar_map = mdev->mlx5e_res.bfreg.map;
1060
1061 param->wq.db_numa_node = cpu_to_node(c->cpu);
1062 err = mlx5_wq_cyc_create(mdev, &param->wq, sqc_wq, &sq->wq, &sq->wq_ctrl);
1063 if (err)
1064 return err;
1065 sq->wq.db = &sq->wq.db[MLX5_SND_DBR];
1066
1067 err = mlx5e_alloc_icosq_db(sq, cpu_to_node(c->cpu));
1068 if (err)
1069 goto err_sq_wq_destroy;
1070
1071 sq->edge = (sq->wq.sz_m1 + 1) - MLX5E_ICOSQ_MAX_WQEBBS;
1072
1073 return 0;
1074
1075 err_sq_wq_destroy:
1076 mlx5_wq_destroy(&sq->wq_ctrl);
1077
1078 return err;
1079 }
1080
1081 static void mlx5e_free_icosq(struct mlx5e_icosq *sq)
1082 {
1083 mlx5e_free_icosq_db(sq);
1084 mlx5_wq_destroy(&sq->wq_ctrl);
1085 }
1086
1087 static void mlx5e_free_txqsq_db(struct mlx5e_txqsq *sq)
1088 {
1089 kfree(sq->db.wqe_info);
1090 kfree(sq->db.dma_fifo);
1091 }
1092
1093 static int mlx5e_alloc_txqsq_db(struct mlx5e_txqsq *sq, int numa)
1094 {
1095 int wq_sz = mlx5_wq_cyc_get_size(&sq->wq);
1096 int df_sz = wq_sz * MLX5_SEND_WQEBB_NUM_DS;
1097
1098 sq->db.dma_fifo = kzalloc_node(df_sz * sizeof(*sq->db.dma_fifo),
1099 GFP_KERNEL, numa);
1100 sq->db.wqe_info = kzalloc_node(wq_sz * sizeof(*sq->db.wqe_info),
1101 GFP_KERNEL, numa);
1102 if (!sq->db.dma_fifo || !sq->db.wqe_info) {
1103 mlx5e_free_txqsq_db(sq);
1104 return -ENOMEM;
1105 }
1106
1107 sq->dma_fifo_mask = df_sz - 1;
1108
1109 return 0;
1110 }
1111
1112 static int mlx5e_alloc_txqsq(struct mlx5e_channel *c,
1113 int txq_ix,
1114 struct mlx5e_params *params,
1115 struct mlx5e_sq_param *param,
1116 struct mlx5e_txqsq *sq)
1117 {
1118 void *sqc_wq = MLX5_ADDR_OF(sqc, param->sqc, wq);
1119 struct mlx5_core_dev *mdev = c->mdev;
1120 int err;
1121
1122 sq->pdev = c->pdev;
1123 sq->tstamp = c->tstamp;
1124 sq->clock = &mdev->clock;
1125 sq->mkey_be = c->mkey_be;
1126 sq->channel = c;
1127 sq->txq_ix = txq_ix;
1128 sq->uar_map = mdev->mlx5e_res.bfreg.map;
1129 sq->max_inline = params->tx_max_inline;
1130 sq->min_inline_mode = params->tx_min_inline_mode;
1131 if (MLX5_IPSEC_DEV(c->priv->mdev))
1132 set_bit(MLX5E_SQ_STATE_IPSEC, &sq->state);
1133
1134 param->wq.db_numa_node = cpu_to_node(c->cpu);
1135 err = mlx5_wq_cyc_create(mdev, &param->wq, sqc_wq, &sq->wq, &sq->wq_ctrl);
1136 if (err)
1137 return err;
1138 sq->wq.db = &sq->wq.db[MLX5_SND_DBR];
1139
1140 err = mlx5e_alloc_txqsq_db(sq, cpu_to_node(c->cpu));
1141 if (err)
1142 goto err_sq_wq_destroy;
1143
1144 sq->edge = (sq->wq.sz_m1 + 1) - MLX5_SEND_WQE_MAX_WQEBBS;
1145
1146 return 0;
1147
1148 err_sq_wq_destroy:
1149 mlx5_wq_destroy(&sq->wq_ctrl);
1150
1151 return err;
1152 }
1153
1154 static void mlx5e_free_txqsq(struct mlx5e_txqsq *sq)
1155 {
1156 mlx5e_free_txqsq_db(sq);
1157 mlx5_wq_destroy(&sq->wq_ctrl);
1158 }
1159
1160 struct mlx5e_create_sq_param {
1161 struct mlx5_wq_ctrl *wq_ctrl;
1162 u32 cqn;
1163 u32 tisn;
1164 u8 tis_lst_sz;
1165 u8 min_inline_mode;
1166 };
1167
1168 static int mlx5e_create_sq(struct mlx5_core_dev *mdev,
1169 struct mlx5e_sq_param *param,
1170 struct mlx5e_create_sq_param *csp,
1171 u32 *sqn)
1172 {
1173 void *in;
1174 void *sqc;
1175 void *wq;
1176 int inlen;
1177 int err;
1178
1179 inlen = MLX5_ST_SZ_BYTES(create_sq_in) +
1180 sizeof(u64) * csp->wq_ctrl->buf.npages;
1181 in = kvzalloc(inlen, GFP_KERNEL);
1182 if (!in)
1183 return -ENOMEM;
1184
1185 sqc = MLX5_ADDR_OF(create_sq_in, in, ctx);
1186 wq = MLX5_ADDR_OF(sqc, sqc, wq);
1187
1188 memcpy(sqc, param->sqc, sizeof(param->sqc));
1189 MLX5_SET(sqc, sqc, tis_lst_sz, csp->tis_lst_sz);
1190 MLX5_SET(sqc, sqc, tis_num_0, csp->tisn);
1191 MLX5_SET(sqc, sqc, cqn, csp->cqn);
1192
1193 if (MLX5_CAP_ETH(mdev, wqe_inline_mode) == MLX5_CAP_INLINE_MODE_VPORT_CONTEXT)
1194 MLX5_SET(sqc, sqc, min_wqe_inline_mode, csp->min_inline_mode);
1195
1196 MLX5_SET(sqc, sqc, state, MLX5_SQC_STATE_RST);
1197
1198 MLX5_SET(wq, wq, wq_type, MLX5_WQ_TYPE_CYCLIC);
1199 MLX5_SET(wq, wq, uar_page, mdev->mlx5e_res.bfreg.index);
1200 MLX5_SET(wq, wq, log_wq_pg_sz, csp->wq_ctrl->buf.page_shift -
1201 MLX5_ADAPTER_PAGE_SHIFT);
1202 MLX5_SET64(wq, wq, dbr_addr, csp->wq_ctrl->db.dma);
1203
1204 mlx5_fill_page_array(&csp->wq_ctrl->buf, (__be64 *)MLX5_ADDR_OF(wq, wq, pas));
1205
1206 err = mlx5_core_create_sq(mdev, in, inlen, sqn);
1207
1208 kvfree(in);
1209
1210 return err;
1211 }
1212
1213 struct mlx5e_modify_sq_param {
1214 int curr_state;
1215 int next_state;
1216 bool rl_update;
1217 int rl_index;
1218 };
1219
1220 static int mlx5e_modify_sq(struct mlx5_core_dev *mdev, u32 sqn,
1221 struct mlx5e_modify_sq_param *p)
1222 {
1223 void *in;
1224 void *sqc;
1225 int inlen;
1226 int err;
1227
1228 inlen = MLX5_ST_SZ_BYTES(modify_sq_in);
1229 in = kvzalloc(inlen, GFP_KERNEL);
1230 if (!in)
1231 return -ENOMEM;
1232
1233 sqc = MLX5_ADDR_OF(modify_sq_in, in, ctx);
1234
1235 MLX5_SET(modify_sq_in, in, sq_state, p->curr_state);
1236 MLX5_SET(sqc, sqc, state, p->next_state);
1237 if (p->rl_update && p->next_state == MLX5_SQC_STATE_RDY) {
1238 MLX5_SET64(modify_sq_in, in, modify_bitmask, 1);
1239 MLX5_SET(sqc, sqc, packet_pacing_rate_limit_index, p->rl_index);
1240 }
1241
1242 err = mlx5_core_modify_sq(mdev, sqn, in, inlen);
1243
1244 kvfree(in);
1245
1246 return err;
1247 }
1248
1249 static void mlx5e_destroy_sq(struct mlx5_core_dev *mdev, u32 sqn)
1250 {
1251 mlx5_core_destroy_sq(mdev, sqn);
1252 }
1253
1254 static int mlx5e_create_sq_rdy(struct mlx5_core_dev *mdev,
1255 struct mlx5e_sq_param *param,
1256 struct mlx5e_create_sq_param *csp,
1257 u32 *sqn)
1258 {
1259 struct mlx5e_modify_sq_param msp = {0};
1260 int err;
1261
1262 err = mlx5e_create_sq(mdev, param, csp, sqn);
1263 if (err)
1264 return err;
1265
1266 msp.curr_state = MLX5_SQC_STATE_RST;
1267 msp.next_state = MLX5_SQC_STATE_RDY;
1268 err = mlx5e_modify_sq(mdev, *sqn, &msp);
1269 if (err)
1270 mlx5e_destroy_sq(mdev, *sqn);
1271
1272 return err;
1273 }
1274
1275 static int mlx5e_set_sq_maxrate(struct net_device *dev,
1276 struct mlx5e_txqsq *sq, u32 rate);
1277
1278 static int mlx5e_open_txqsq(struct mlx5e_channel *c,
1279 u32 tisn,
1280 int txq_ix,
1281 struct mlx5e_params *params,
1282 struct mlx5e_sq_param *param,
1283 struct mlx5e_txqsq *sq)
1284 {
1285 struct mlx5e_create_sq_param csp = {};
1286 u32 tx_rate;
1287 int err;
1288
1289 err = mlx5e_alloc_txqsq(c, txq_ix, params, param, sq);
1290 if (err)
1291 return err;
1292
1293 csp.tisn = tisn;
1294 csp.tis_lst_sz = 1;
1295 csp.cqn = sq->cq.mcq.cqn;
1296 csp.wq_ctrl = &sq->wq_ctrl;
1297 csp.min_inline_mode = sq->min_inline_mode;
1298 err = mlx5e_create_sq_rdy(c->mdev, param, &csp, &sq->sqn);
1299 if (err)
1300 goto err_free_txqsq;
1301
1302 tx_rate = c->priv->tx_rates[sq->txq_ix];
1303 if (tx_rate)
1304 mlx5e_set_sq_maxrate(c->netdev, sq, tx_rate);
1305
1306 return 0;
1307
1308 err_free_txqsq:
1309 clear_bit(MLX5E_SQ_STATE_ENABLED, &sq->state);
1310 mlx5e_free_txqsq(sq);
1311
1312 return err;
1313 }
1314
1315 static void mlx5e_activate_txqsq(struct mlx5e_txqsq *sq)
1316 {
1317 sq->txq = netdev_get_tx_queue(sq->channel->netdev, sq->txq_ix);
1318 set_bit(MLX5E_SQ_STATE_ENABLED, &sq->state);
1319 netdev_tx_reset_queue(sq->txq);
1320 netif_tx_start_queue(sq->txq);
1321 }
1322
1323 static inline void netif_tx_disable_queue(struct netdev_queue *txq)
1324 {
1325 __netif_tx_lock_bh(txq);
1326 netif_tx_stop_queue(txq);
1327 __netif_tx_unlock_bh(txq);
1328 }
1329
1330 static void mlx5e_deactivate_txqsq(struct mlx5e_txqsq *sq)
1331 {
1332 struct mlx5e_channel *c = sq->channel;
1333
1334 clear_bit(MLX5E_SQ_STATE_ENABLED, &sq->state);
1335 /* prevent netif_tx_wake_queue */
1336 napi_synchronize(&c->napi);
1337
1338 netif_tx_disable_queue(sq->txq);
1339
1340 /* last doorbell out, godspeed .. */
1341 if (mlx5e_wqc_has_room_for(&sq->wq, sq->cc, sq->pc, 1)) {
1342 struct mlx5e_tx_wqe *nop;
1343
1344 sq->db.wqe_info[(sq->pc & sq->wq.sz_m1)].skb = NULL;
1345 nop = mlx5e_post_nop(&sq->wq, sq->sqn, &sq->pc);
1346 mlx5e_notify_hw(&sq->wq, sq->pc, sq->uar_map, &nop->ctrl);
1347 }
1348 }
1349
1350 static void mlx5e_close_txqsq(struct mlx5e_txqsq *sq)
1351 {
1352 struct mlx5e_channel *c = sq->channel;
1353 struct mlx5_core_dev *mdev = c->mdev;
1354
1355 mlx5e_destroy_sq(mdev, sq->sqn);
1356 if (sq->rate_limit)
1357 mlx5_rl_remove_rate(mdev, sq->rate_limit);
1358 mlx5e_free_txqsq_descs(sq);
1359 mlx5e_free_txqsq(sq);
1360 }
1361
1362 static int mlx5e_open_icosq(struct mlx5e_channel *c,
1363 struct mlx5e_params *params,
1364 struct mlx5e_sq_param *param,
1365 struct mlx5e_icosq *sq)
1366 {
1367 struct mlx5e_create_sq_param csp = {};
1368 int err;
1369
1370 err = mlx5e_alloc_icosq(c, param, sq);
1371 if (err)
1372 return err;
1373
1374 csp.cqn = sq->cq.mcq.cqn;
1375 csp.wq_ctrl = &sq->wq_ctrl;
1376 csp.min_inline_mode = params->tx_min_inline_mode;
1377 set_bit(MLX5E_SQ_STATE_ENABLED, &sq->state);
1378 err = mlx5e_create_sq_rdy(c->mdev, param, &csp, &sq->sqn);
1379 if (err)
1380 goto err_free_icosq;
1381
1382 return 0;
1383
1384 err_free_icosq:
1385 clear_bit(MLX5E_SQ_STATE_ENABLED, &sq->state);
1386 mlx5e_free_icosq(sq);
1387
1388 return err;
1389 }
1390
1391 static void mlx5e_close_icosq(struct mlx5e_icosq *sq)
1392 {
1393 struct mlx5e_channel *c = sq->channel;
1394
1395 clear_bit(MLX5E_SQ_STATE_ENABLED, &sq->state);
1396 napi_synchronize(&c->napi);
1397
1398 mlx5e_destroy_sq(c->mdev, sq->sqn);
1399 mlx5e_free_icosq(sq);
1400 }
1401
1402 static int mlx5e_open_xdpsq(struct mlx5e_channel *c,
1403 struct mlx5e_params *params,
1404 struct mlx5e_sq_param *param,
1405 struct mlx5e_xdpsq *sq)
1406 {
1407 unsigned int ds_cnt = MLX5E_XDP_TX_DS_COUNT;
1408 struct mlx5e_create_sq_param csp = {};
1409 unsigned int inline_hdr_sz = 0;
1410 int err;
1411 int i;
1412
1413 err = mlx5e_alloc_xdpsq(c, params, param, sq);
1414 if (err)
1415 return err;
1416
1417 csp.tis_lst_sz = 1;
1418 csp.tisn = c->priv->tisn[0]; /* tc = 0 */
1419 csp.cqn = sq->cq.mcq.cqn;
1420 csp.wq_ctrl = &sq->wq_ctrl;
1421 csp.min_inline_mode = sq->min_inline_mode;
1422 set_bit(MLX5E_SQ_STATE_ENABLED, &sq->state);
1423 err = mlx5e_create_sq_rdy(c->mdev, param, &csp, &sq->sqn);
1424 if (err)
1425 goto err_free_xdpsq;
1426
1427 if (sq->min_inline_mode != MLX5_INLINE_MODE_NONE) {
1428 inline_hdr_sz = MLX5E_XDP_MIN_INLINE;
1429 ds_cnt++;
1430 }
1431
1432 /* Pre initialize fixed WQE fields */
1433 for (i = 0; i < mlx5_wq_cyc_get_size(&sq->wq); i++) {
1434 struct mlx5e_tx_wqe *wqe = mlx5_wq_cyc_get_wqe(&sq->wq, i);
1435 struct mlx5_wqe_ctrl_seg *cseg = &wqe->ctrl;
1436 struct mlx5_wqe_eth_seg *eseg = &wqe->eth;
1437 struct mlx5_wqe_data_seg *dseg;
1438
1439 cseg->qpn_ds = cpu_to_be32((sq->sqn << 8) | ds_cnt);
1440 eseg->inline_hdr.sz = cpu_to_be16(inline_hdr_sz);
1441
1442 dseg = (struct mlx5_wqe_data_seg *)cseg + (ds_cnt - 1);
1443 dseg->lkey = sq->mkey_be;
1444 }
1445
1446 return 0;
1447
1448 err_free_xdpsq:
1449 clear_bit(MLX5E_SQ_STATE_ENABLED, &sq->state);
1450 mlx5e_free_xdpsq(sq);
1451
1452 return err;
1453 }
1454
1455 static void mlx5e_close_xdpsq(struct mlx5e_xdpsq *sq)
1456 {
1457 struct mlx5e_channel *c = sq->channel;
1458
1459 clear_bit(MLX5E_SQ_STATE_ENABLED, &sq->state);
1460 napi_synchronize(&c->napi);
1461
1462 mlx5e_destroy_sq(c->mdev, sq->sqn);
1463 mlx5e_free_xdpsq_descs(sq);
1464 mlx5e_free_xdpsq(sq);
1465 }
1466
1467 static int mlx5e_alloc_cq_common(struct mlx5_core_dev *mdev,
1468 struct mlx5e_cq_param *param,
1469 struct mlx5e_cq *cq)
1470 {
1471 struct mlx5_core_cq *mcq = &cq->mcq;
1472 int eqn_not_used;
1473 unsigned int irqn;
1474 int err;
1475 u32 i;
1476
1477 err = mlx5_vector2eqn(mdev, param->eq_ix, &eqn_not_used, &irqn);
1478 if (err)
1479 return err;
1480
1481 err = mlx5_cqwq_create(mdev, &param->wq, param->cqc, &cq->wq,
1482 &cq->wq_ctrl);
1483 if (err)
1484 return err;
1485
1486 mcq->cqe_sz = 64;
1487 mcq->set_ci_db = cq->wq_ctrl.db.db;
1488 mcq->arm_db = cq->wq_ctrl.db.db + 1;
1489 *mcq->set_ci_db = 0;
1490 *mcq->arm_db = 0;
1491 mcq->vector = param->eq_ix;
1492 mcq->comp = mlx5e_completion_event;
1493 mcq->event = mlx5e_cq_error_event;
1494 mcq->irqn = irqn;
1495
1496 for (i = 0; i < mlx5_cqwq_get_size(&cq->wq); i++) {
1497 struct mlx5_cqe64 *cqe = mlx5_cqwq_get_wqe(&cq->wq, i);
1498
1499 cqe->op_own = 0xf1;
1500 }
1501
1502 cq->mdev = mdev;
1503
1504 return 0;
1505 }
1506
1507 static int mlx5e_alloc_cq(struct mlx5e_channel *c,
1508 struct mlx5e_cq_param *param,
1509 struct mlx5e_cq *cq)
1510 {
1511 struct mlx5_core_dev *mdev = c->priv->mdev;
1512 int err;
1513
1514 param->wq.buf_numa_node = cpu_to_node(c->cpu);
1515 param->wq.db_numa_node = cpu_to_node(c->cpu);
1516 param->eq_ix = c->ix;
1517
1518 err = mlx5e_alloc_cq_common(mdev, param, cq);
1519
1520 cq->napi = &c->napi;
1521 cq->channel = c;
1522
1523 return err;
1524 }
1525
1526 static void mlx5e_free_cq(struct mlx5e_cq *cq)
1527 {
1528 mlx5_cqwq_destroy(&cq->wq_ctrl);
1529 }
1530
1531 static int mlx5e_create_cq(struct mlx5e_cq *cq, struct mlx5e_cq_param *param)
1532 {
1533 struct mlx5_core_dev *mdev = cq->mdev;
1534 struct mlx5_core_cq *mcq = &cq->mcq;
1535
1536 void *in;
1537 void *cqc;
1538 int inlen;
1539 unsigned int irqn_not_used;
1540 int eqn;
1541 int err;
1542
1543 err = mlx5_vector2eqn(mdev, param->eq_ix, &eqn, &irqn_not_used);
1544 if (err)
1545 return err;
1546
1547 inlen = MLX5_ST_SZ_BYTES(create_cq_in) +
1548 sizeof(u64) * cq->wq_ctrl.frag_buf.npages;
1549 in = kvzalloc(inlen, GFP_KERNEL);
1550 if (!in)
1551 return -ENOMEM;
1552
1553 cqc = MLX5_ADDR_OF(create_cq_in, in, cq_context);
1554
1555 memcpy(cqc, param->cqc, sizeof(param->cqc));
1556
1557 mlx5_fill_page_frag_array(&cq->wq_ctrl.frag_buf,
1558 (__be64 *)MLX5_ADDR_OF(create_cq_in, in, pas));
1559
1560 MLX5_SET(cqc, cqc, cq_period_mode, param->cq_period_mode);
1561 MLX5_SET(cqc, cqc, c_eqn, eqn);
1562 MLX5_SET(cqc, cqc, uar_page, mdev->priv.uar->index);
1563 MLX5_SET(cqc, cqc, log_page_size, cq->wq_ctrl.frag_buf.page_shift -
1564 MLX5_ADAPTER_PAGE_SHIFT);
1565 MLX5_SET64(cqc, cqc, dbr_addr, cq->wq_ctrl.db.dma);
1566
1567 err = mlx5_core_create_cq(mdev, mcq, in, inlen);
1568
1569 kvfree(in);
1570
1571 if (err)
1572 return err;
1573
1574 mlx5e_cq_arm(cq);
1575
1576 return 0;
1577 }
1578
1579 static void mlx5e_destroy_cq(struct mlx5e_cq *cq)
1580 {
1581 mlx5_core_destroy_cq(cq->mdev, &cq->mcq);
1582 }
1583
1584 static int mlx5e_open_cq(struct mlx5e_channel *c,
1585 struct mlx5e_cq_moder moder,
1586 struct mlx5e_cq_param *param,
1587 struct mlx5e_cq *cq)
1588 {
1589 struct mlx5_core_dev *mdev = c->mdev;
1590 int err;
1591
1592 err = mlx5e_alloc_cq(c, param, cq);
1593 if (err)
1594 return err;
1595
1596 err = mlx5e_create_cq(cq, param);
1597 if (err)
1598 goto err_free_cq;
1599
1600 if (MLX5_CAP_GEN(mdev, cq_moderation))
1601 mlx5_core_modify_cq_moderation(mdev, &cq->mcq, moder.usec, moder.pkts);
1602 return 0;
1603
1604 err_free_cq:
1605 mlx5e_free_cq(cq);
1606
1607 return err;
1608 }
1609
1610 static void mlx5e_close_cq(struct mlx5e_cq *cq)
1611 {
1612 mlx5e_destroy_cq(cq);
1613 mlx5e_free_cq(cq);
1614 }
1615
1616 static int mlx5e_get_cpu(struct mlx5e_priv *priv, int ix)
1617 {
1618 return cpumask_first(priv->mdev->priv.irq_info[ix + MLX5_EQ_VEC_COMP_BASE].mask);
1619 }
1620
1621 static int mlx5e_open_tx_cqs(struct mlx5e_channel *c,
1622 struct mlx5e_params *params,
1623 struct mlx5e_channel_param *cparam)
1624 {
1625 int err;
1626 int tc;
1627
1628 for (tc = 0; tc < c->num_tc; tc++) {
1629 err = mlx5e_open_cq(c, params->tx_cq_moderation,
1630 &cparam->tx_cq, &c->sq[tc].cq);
1631 if (err)
1632 goto err_close_tx_cqs;
1633 }
1634
1635 return 0;
1636
1637 err_close_tx_cqs:
1638 for (tc--; tc >= 0; tc--)
1639 mlx5e_close_cq(&c->sq[tc].cq);
1640
1641 return err;
1642 }
1643
1644 static void mlx5e_close_tx_cqs(struct mlx5e_channel *c)
1645 {
1646 int tc;
1647
1648 for (tc = 0; tc < c->num_tc; tc++)
1649 mlx5e_close_cq(&c->sq[tc].cq);
1650 }
1651
1652 static int mlx5e_open_sqs(struct mlx5e_channel *c,
1653 struct mlx5e_params *params,
1654 struct mlx5e_channel_param *cparam)
1655 {
1656 int err;
1657 int tc;
1658
1659 for (tc = 0; tc < params->num_tc; tc++) {
1660 int txq_ix = c->ix + tc * params->num_channels;
1661
1662 err = mlx5e_open_txqsq(c, c->priv->tisn[tc], txq_ix,
1663 params, &cparam->sq, &c->sq[tc]);
1664 if (err)
1665 goto err_close_sqs;
1666 }
1667
1668 return 0;
1669
1670 err_close_sqs:
1671 for (tc--; tc >= 0; tc--)
1672 mlx5e_close_txqsq(&c->sq[tc]);
1673
1674 return err;
1675 }
1676
1677 static void mlx5e_close_sqs(struct mlx5e_channel *c)
1678 {
1679 int tc;
1680
1681 for (tc = 0; tc < c->num_tc; tc++)
1682 mlx5e_close_txqsq(&c->sq[tc]);
1683 }
1684
1685 static int mlx5e_set_sq_maxrate(struct net_device *dev,
1686 struct mlx5e_txqsq *sq, u32 rate)
1687 {
1688 struct mlx5e_priv *priv = netdev_priv(dev);
1689 struct mlx5_core_dev *mdev = priv->mdev;
1690 struct mlx5e_modify_sq_param msp = {0};
1691 u16 rl_index = 0;
1692 int err;
1693
1694 if (rate == sq->rate_limit)
1695 /* nothing to do */
1696 return 0;
1697
1698 if (sq->rate_limit)
1699 /* remove current rl index to free space to next ones */
1700 mlx5_rl_remove_rate(mdev, sq->rate_limit);
1701
1702 sq->rate_limit = 0;
1703
1704 if (rate) {
1705 err = mlx5_rl_add_rate(mdev, rate, &rl_index);
1706 if (err) {
1707 netdev_err(dev, "Failed configuring rate %u: %d\n",
1708 rate, err);
1709 return err;
1710 }
1711 }
1712
1713 msp.curr_state = MLX5_SQC_STATE_RDY;
1714 msp.next_state = MLX5_SQC_STATE_RDY;
1715 msp.rl_index = rl_index;
1716 msp.rl_update = true;
1717 err = mlx5e_modify_sq(mdev, sq->sqn, &msp);
1718 if (err) {
1719 netdev_err(dev, "Failed configuring rate %u: %d\n",
1720 rate, err);
1721 /* remove the rate from the table */
1722 if (rate)
1723 mlx5_rl_remove_rate(mdev, rate);
1724 return err;
1725 }
1726
1727 sq->rate_limit = rate;
1728 return 0;
1729 }
1730
1731 static int mlx5e_set_tx_maxrate(struct net_device *dev, int index, u32 rate)
1732 {
1733 struct mlx5e_priv *priv = netdev_priv(dev);
1734 struct mlx5_core_dev *mdev = priv->mdev;
1735 struct mlx5e_txqsq *sq = priv->txq2sq[index];
1736 int err = 0;
1737
1738 if (!mlx5_rl_is_supported(mdev)) {
1739 netdev_err(dev, "Rate limiting is not supported on this device\n");
1740 return -EINVAL;
1741 }
1742
1743 /* rate is given in Mb/sec, HW config is in Kb/sec */
1744 rate = rate << 10;
1745
1746 /* Check whether rate in valid range, 0 is always valid */
1747 if (rate && !mlx5_rl_is_in_range(mdev, rate)) {
1748 netdev_err(dev, "TX rate %u, is not in range\n", rate);
1749 return -ERANGE;
1750 }
1751
1752 mutex_lock(&priv->state_lock);
1753 if (test_bit(MLX5E_STATE_OPENED, &priv->state))
1754 err = mlx5e_set_sq_maxrate(dev, sq, rate);
1755 if (!err)
1756 priv->tx_rates[index] = rate;
1757 mutex_unlock(&priv->state_lock);
1758
1759 return err;
1760 }
1761
1762 static int mlx5e_open_channel(struct mlx5e_priv *priv, int ix,
1763 struct mlx5e_params *params,
1764 struct mlx5e_channel_param *cparam,
1765 struct mlx5e_channel **cp)
1766 {
1767 struct mlx5e_cq_moder icocq_moder = {0, 0};
1768 struct net_device *netdev = priv->netdev;
1769 int cpu = mlx5e_get_cpu(priv, ix);
1770 struct mlx5e_channel *c;
1771 unsigned int irq;
1772 int err;
1773 int eqn;
1774
1775 err = mlx5_vector2eqn(priv->mdev, ix, &eqn, &irq);
1776 if (err)
1777 return err;
1778
1779 c = kzalloc_node(sizeof(*c), GFP_KERNEL, cpu_to_node(cpu));
1780 if (!c)
1781 return -ENOMEM;
1782
1783 c->priv = priv;
1784 c->mdev = priv->mdev;
1785 c->tstamp = &priv->tstamp;
1786 c->ix = ix;
1787 c->cpu = cpu;
1788 c->pdev = &priv->mdev->pdev->dev;
1789 c->netdev = priv->netdev;
1790 c->mkey_be = cpu_to_be32(priv->mdev->mlx5e_res.mkey.key);
1791 c->num_tc = params->num_tc;
1792 c->xdp = !!params->xdp_prog;
1793
1794 c->irq_desc = irq_to_desc(irq);
1795
1796 netif_napi_add(netdev, &c->napi, mlx5e_napi_poll, 64);
1797
1798 err = mlx5e_open_cq(c, icocq_moder, &cparam->icosq_cq, &c->icosq.cq);
1799 if (err)
1800 goto err_napi_del;
1801
1802 err = mlx5e_open_tx_cqs(c, params, cparam);
1803 if (err)
1804 goto err_close_icosq_cq;
1805
1806 err = mlx5e_open_cq(c, params->rx_cq_moderation, &cparam->rx_cq, &c->rq.cq);
1807 if (err)
1808 goto err_close_tx_cqs;
1809
1810 /* XDP SQ CQ params are same as normal TXQ sq CQ params */
1811 err = c->xdp ? mlx5e_open_cq(c, params->tx_cq_moderation,
1812 &cparam->tx_cq, &c->rq.xdpsq.cq) : 0;
1813 if (err)
1814 goto err_close_rx_cq;
1815
1816 napi_enable(&c->napi);
1817
1818 err = mlx5e_open_icosq(c, params, &cparam->icosq, &c->icosq);
1819 if (err)
1820 goto err_disable_napi;
1821
1822 err = mlx5e_open_sqs(c, params, cparam);
1823 if (err)
1824 goto err_close_icosq;
1825
1826 err = c->xdp ? mlx5e_open_xdpsq(c, params, &cparam->xdp_sq, &c->rq.xdpsq) : 0;
1827 if (err)
1828 goto err_close_sqs;
1829
1830 err = mlx5e_open_rq(c, params, &cparam->rq, &c->rq);
1831 if (err)
1832 goto err_close_xdp_sq;
1833
1834 *cp = c;
1835
1836 return 0;
1837 err_close_xdp_sq:
1838 if (c->xdp)
1839 mlx5e_close_xdpsq(&c->rq.xdpsq);
1840
1841 err_close_sqs:
1842 mlx5e_close_sqs(c);
1843
1844 err_close_icosq:
1845 mlx5e_close_icosq(&c->icosq);
1846
1847 err_disable_napi:
1848 napi_disable(&c->napi);
1849 if (c->xdp)
1850 mlx5e_close_cq(&c->rq.xdpsq.cq);
1851
1852 err_close_rx_cq:
1853 mlx5e_close_cq(&c->rq.cq);
1854
1855 err_close_tx_cqs:
1856 mlx5e_close_tx_cqs(c);
1857
1858 err_close_icosq_cq:
1859 mlx5e_close_cq(&c->icosq.cq);
1860
1861 err_napi_del:
1862 netif_napi_del(&c->napi);
1863 kfree(c);
1864
1865 return err;
1866 }
1867
1868 static void mlx5e_activate_channel(struct mlx5e_channel *c)
1869 {
1870 int tc;
1871
1872 for (tc = 0; tc < c->num_tc; tc++)
1873 mlx5e_activate_txqsq(&c->sq[tc]);
1874 mlx5e_activate_rq(&c->rq);
1875 netif_set_xps_queue(c->netdev, get_cpu_mask(c->cpu), c->ix);
1876 }
1877
1878 static void mlx5e_deactivate_channel(struct mlx5e_channel *c)
1879 {
1880 int tc;
1881
1882 mlx5e_deactivate_rq(&c->rq);
1883 for (tc = 0; tc < c->num_tc; tc++)
1884 mlx5e_deactivate_txqsq(&c->sq[tc]);
1885 }
1886
1887 static void mlx5e_close_channel(struct mlx5e_channel *c)
1888 {
1889 mlx5e_close_rq(&c->rq);
1890 if (c->xdp)
1891 mlx5e_close_xdpsq(&c->rq.xdpsq);
1892 mlx5e_close_sqs(c);
1893 mlx5e_close_icosq(&c->icosq);
1894 napi_disable(&c->napi);
1895 if (c->xdp)
1896 mlx5e_close_cq(&c->rq.xdpsq.cq);
1897 mlx5e_close_cq(&c->rq.cq);
1898 mlx5e_close_tx_cqs(c);
1899 mlx5e_close_cq(&c->icosq.cq);
1900 netif_napi_del(&c->napi);
1901
1902 kfree(c);
1903 }
1904
1905 static void mlx5e_build_rq_param(struct mlx5e_priv *priv,
1906 struct mlx5e_params *params,
1907 struct mlx5e_rq_param *param)
1908 {
1909 void *rqc = param->rqc;
1910 void *wq = MLX5_ADDR_OF(rqc, rqc, wq);
1911
1912 switch (params->rq_wq_type) {
1913 case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ:
1914 MLX5_SET(wq, wq, log_wqe_num_of_strides, params->mpwqe_log_num_strides - 9);
1915 MLX5_SET(wq, wq, log_wqe_stride_size, params->mpwqe_log_stride_sz - 6);
1916 MLX5_SET(wq, wq, wq_type, MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ);
1917 break;
1918 default: /* MLX5_WQ_TYPE_LINKED_LIST */
1919 MLX5_SET(wq, wq, wq_type, MLX5_WQ_TYPE_LINKED_LIST);
1920 }
1921
1922 MLX5_SET(wq, wq, end_padding_mode, MLX5_WQ_END_PAD_MODE_ALIGN);
1923 MLX5_SET(wq, wq, log_wq_stride, ilog2(sizeof(struct mlx5e_rx_wqe)));
1924 MLX5_SET(wq, wq, log_wq_sz, params->log_rq_size);
1925 MLX5_SET(wq, wq, pd, priv->mdev->mlx5e_res.pdn);
1926 MLX5_SET(rqc, rqc, counter_set_id, priv->q_counter);
1927 MLX5_SET(rqc, rqc, vsd, params->vlan_strip_disable);
1928 MLX5_SET(rqc, rqc, scatter_fcs, params->scatter_fcs_en);
1929
1930 param->wq.buf_numa_node = dev_to_node(&priv->mdev->pdev->dev);
1931 param->wq.linear = 1;
1932 }
1933
1934 static void mlx5e_build_drop_rq_param(struct mlx5_core_dev *mdev,
1935 struct mlx5e_rq_param *param)
1936 {
1937 void *rqc = param->rqc;
1938 void *wq = MLX5_ADDR_OF(rqc, rqc, wq);
1939
1940 MLX5_SET(wq, wq, wq_type, MLX5_WQ_TYPE_LINKED_LIST);
1941 MLX5_SET(wq, wq, log_wq_stride, ilog2(sizeof(struct mlx5e_rx_wqe)));
1942
1943 param->wq.buf_numa_node = dev_to_node(&mdev->pdev->dev);
1944 }
1945
1946 static void mlx5e_build_sq_param_common(struct mlx5e_priv *priv,
1947 struct mlx5e_sq_param *param)
1948 {
1949 void *sqc = param->sqc;
1950 void *wq = MLX5_ADDR_OF(sqc, sqc, wq);
1951
1952 MLX5_SET(wq, wq, log_wq_stride, ilog2(MLX5_SEND_WQE_BB));
1953 MLX5_SET(wq, wq, pd, priv->mdev->mlx5e_res.pdn);
1954
1955 param->wq.buf_numa_node = dev_to_node(&priv->mdev->pdev->dev);
1956 }
1957
1958 static void mlx5e_build_sq_param(struct mlx5e_priv *priv,
1959 struct mlx5e_params *params,
1960 struct mlx5e_sq_param *param)
1961 {
1962 void *sqc = param->sqc;
1963 void *wq = MLX5_ADDR_OF(sqc, sqc, wq);
1964
1965 mlx5e_build_sq_param_common(priv, param);
1966 MLX5_SET(wq, wq, log_wq_sz, params->log_sq_size);
1967 MLX5_SET(sqc, sqc, allow_swp, !!MLX5_IPSEC_DEV(priv->mdev));
1968 }
1969
1970 static void mlx5e_build_common_cq_param(struct mlx5e_priv *priv,
1971 struct mlx5e_cq_param *param)
1972 {
1973 void *cqc = param->cqc;
1974
1975 MLX5_SET(cqc, cqc, uar_page, priv->mdev->priv.uar->index);
1976 }
1977
1978 static void mlx5e_build_rx_cq_param(struct mlx5e_priv *priv,
1979 struct mlx5e_params *params,
1980 struct mlx5e_cq_param *param)
1981 {
1982 void *cqc = param->cqc;
1983 u8 log_cq_size;
1984
1985 switch (params->rq_wq_type) {
1986 case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ:
1987 log_cq_size = params->log_rq_size + params->mpwqe_log_num_strides;
1988 break;
1989 default: /* MLX5_WQ_TYPE_LINKED_LIST */
1990 log_cq_size = params->log_rq_size;
1991 }
1992
1993 MLX5_SET(cqc, cqc, log_cq_size, log_cq_size);
1994 if (MLX5E_GET_PFLAG(params, MLX5E_PFLAG_RX_CQE_COMPRESS)) {
1995 MLX5_SET(cqc, cqc, mini_cqe_res_format, MLX5_CQE_FORMAT_CSUM);
1996 MLX5_SET(cqc, cqc, cqe_comp_en, 1);
1997 }
1998
1999 mlx5e_build_common_cq_param(priv, param);
2000 param->cq_period_mode = params->rx_cq_moderation.cq_period_mode;
2001 }
2002
2003 static void mlx5e_build_tx_cq_param(struct mlx5e_priv *priv,
2004 struct mlx5e_params *params,
2005 struct mlx5e_cq_param *param)
2006 {
2007 void *cqc = param->cqc;
2008
2009 MLX5_SET(cqc, cqc, log_cq_size, params->log_sq_size);
2010
2011 mlx5e_build_common_cq_param(priv, param);
2012 param->cq_period_mode = params->tx_cq_moderation.cq_period_mode;
2013 }
2014
2015 static void mlx5e_build_ico_cq_param(struct mlx5e_priv *priv,
2016 u8 log_wq_size,
2017 struct mlx5e_cq_param *param)
2018 {
2019 void *cqc = param->cqc;
2020
2021 MLX5_SET(cqc, cqc, log_cq_size, log_wq_size);
2022
2023 mlx5e_build_common_cq_param(priv, param);
2024
2025 param->cq_period_mode = MLX5_CQ_PERIOD_MODE_START_FROM_EQE;
2026 }
2027
2028 static void mlx5e_build_icosq_param(struct mlx5e_priv *priv,
2029 u8 log_wq_size,
2030 struct mlx5e_sq_param *param)
2031 {
2032 void *sqc = param->sqc;
2033 void *wq = MLX5_ADDR_OF(sqc, sqc, wq);
2034
2035 mlx5e_build_sq_param_common(priv, param);
2036
2037 MLX5_SET(wq, wq, log_wq_sz, log_wq_size);
2038 MLX5_SET(sqc, sqc, reg_umr, MLX5_CAP_ETH(priv->mdev, reg_umr_sq));
2039 }
2040
2041 static void mlx5e_build_xdpsq_param(struct mlx5e_priv *priv,
2042 struct mlx5e_params *params,
2043 struct mlx5e_sq_param *param)
2044 {
2045 void *sqc = param->sqc;
2046 void *wq = MLX5_ADDR_OF(sqc, sqc, wq);
2047
2048 mlx5e_build_sq_param_common(priv, param);
2049 MLX5_SET(wq, wq, log_wq_sz, params->log_sq_size);
2050 }
2051
2052 static void mlx5e_build_channel_param(struct mlx5e_priv *priv,
2053 struct mlx5e_params *params,
2054 struct mlx5e_channel_param *cparam)
2055 {
2056 u8 icosq_log_wq_sz = MLX5E_PARAMS_MINIMUM_LOG_SQ_SIZE;
2057
2058 mlx5e_build_rq_param(priv, params, &cparam->rq);
2059 mlx5e_build_sq_param(priv, params, &cparam->sq);
2060 mlx5e_build_xdpsq_param(priv, params, &cparam->xdp_sq);
2061 mlx5e_build_icosq_param(priv, icosq_log_wq_sz, &cparam->icosq);
2062 mlx5e_build_rx_cq_param(priv, params, &cparam->rx_cq);
2063 mlx5e_build_tx_cq_param(priv, params, &cparam->tx_cq);
2064 mlx5e_build_ico_cq_param(priv, icosq_log_wq_sz, &cparam->icosq_cq);
2065 }
2066
2067 int mlx5e_open_channels(struct mlx5e_priv *priv,
2068 struct mlx5e_channels *chs)
2069 {
2070 struct mlx5e_channel_param *cparam;
2071 int err = -ENOMEM;
2072 int i;
2073
2074 chs->num = chs->params.num_channels;
2075
2076 chs->c = kcalloc(chs->num, sizeof(struct mlx5e_channel *), GFP_KERNEL);
2077 cparam = kzalloc(sizeof(struct mlx5e_channel_param), GFP_KERNEL);
2078 if (!chs->c || !cparam)
2079 goto err_free;
2080
2081 mlx5e_build_channel_param(priv, &chs->params, cparam);
2082 for (i = 0; i < chs->num; i++) {
2083 err = mlx5e_open_channel(priv, i, &chs->params, cparam, &chs->c[i]);
2084 if (err)
2085 goto err_close_channels;
2086 }
2087
2088 kfree(cparam);
2089 return 0;
2090
2091 err_close_channels:
2092 for (i--; i >= 0; i--)
2093 mlx5e_close_channel(chs->c[i]);
2094
2095 err_free:
2096 kfree(chs->c);
2097 kfree(cparam);
2098 chs->num = 0;
2099 return err;
2100 }
2101
2102 static void mlx5e_activate_channels(struct mlx5e_channels *chs)
2103 {
2104 int i;
2105
2106 for (i = 0; i < chs->num; i++)
2107 mlx5e_activate_channel(chs->c[i]);
2108 }
2109
2110 static int mlx5e_wait_channels_min_rx_wqes(struct mlx5e_channels *chs)
2111 {
2112 int err = 0;
2113 int i;
2114
2115 for (i = 0; i < chs->num; i++) {
2116 err = mlx5e_wait_for_min_rx_wqes(&chs->c[i]->rq);
2117 if (err)
2118 break;
2119 }
2120
2121 return err;
2122 }
2123
2124 static void mlx5e_deactivate_channels(struct mlx5e_channels *chs)
2125 {
2126 int i;
2127
2128 for (i = 0; i < chs->num; i++)
2129 mlx5e_deactivate_channel(chs->c[i]);
2130 }
2131
2132 void mlx5e_close_channels(struct mlx5e_channels *chs)
2133 {
2134 int i;
2135
2136 for (i = 0; i < chs->num; i++)
2137 mlx5e_close_channel(chs->c[i]);
2138
2139 kfree(chs->c);
2140 chs->num = 0;
2141 }
2142
2143 static int
2144 mlx5e_create_rqt(struct mlx5e_priv *priv, int sz, struct mlx5e_rqt *rqt)
2145 {
2146 struct mlx5_core_dev *mdev = priv->mdev;
2147 void *rqtc;
2148 int inlen;
2149 int err;
2150 u32 *in;
2151 int i;
2152
2153 inlen = MLX5_ST_SZ_BYTES(create_rqt_in) + sizeof(u32) * sz;
2154 in = kvzalloc(inlen, GFP_KERNEL);
2155 if (!in)
2156 return -ENOMEM;
2157
2158 rqtc = MLX5_ADDR_OF(create_rqt_in, in, rqt_context);
2159
2160 MLX5_SET(rqtc, rqtc, rqt_actual_size, sz);
2161 MLX5_SET(rqtc, rqtc, rqt_max_size, sz);
2162
2163 for (i = 0; i < sz; i++)
2164 MLX5_SET(rqtc, rqtc, rq_num[i], priv->drop_rq.rqn);
2165
2166 err = mlx5_core_create_rqt(mdev, in, inlen, &rqt->rqtn);
2167 if (!err)
2168 rqt->enabled = true;
2169
2170 kvfree(in);
2171 return err;
2172 }
2173
2174 void mlx5e_destroy_rqt(struct mlx5e_priv *priv, struct mlx5e_rqt *rqt)
2175 {
2176 rqt->enabled = false;
2177 mlx5_core_destroy_rqt(priv->mdev, rqt->rqtn);
2178 }
2179
2180 int mlx5e_create_indirect_rqt(struct mlx5e_priv *priv)
2181 {
2182 struct mlx5e_rqt *rqt = &priv->indir_rqt;
2183 int err;
2184
2185 err = mlx5e_create_rqt(priv, MLX5E_INDIR_RQT_SIZE, rqt);
2186 if (err)
2187 mlx5_core_warn(priv->mdev, "create indirect rqts failed, %d\n", err);
2188 return err;
2189 }
2190
2191 int mlx5e_create_direct_rqts(struct mlx5e_priv *priv)
2192 {
2193 struct mlx5e_rqt *rqt;
2194 int err;
2195 int ix;
2196
2197 for (ix = 0; ix < priv->profile->max_nch(priv->mdev); ix++) {
2198 rqt = &priv->direct_tir[ix].rqt;
2199 err = mlx5e_create_rqt(priv, 1 /*size */, rqt);
2200 if (err)
2201 goto err_destroy_rqts;
2202 }
2203
2204 return 0;
2205
2206 err_destroy_rqts:
2207 mlx5_core_warn(priv->mdev, "create direct rqts failed, %d\n", err);
2208 for (ix--; ix >= 0; ix--)
2209 mlx5e_destroy_rqt(priv, &priv->direct_tir[ix].rqt);
2210
2211 return err;
2212 }
2213
2214 void mlx5e_destroy_direct_rqts(struct mlx5e_priv *priv)
2215 {
2216 int i;
2217
2218 for (i = 0; i < priv->profile->max_nch(priv->mdev); i++)
2219 mlx5e_destroy_rqt(priv, &priv->direct_tir[i].rqt);
2220 }
2221
2222 static int mlx5e_rx_hash_fn(int hfunc)
2223 {
2224 return (hfunc == ETH_RSS_HASH_TOP) ?
2225 MLX5_RX_HASH_FN_TOEPLITZ :
2226 MLX5_RX_HASH_FN_INVERTED_XOR8;
2227 }
2228
2229 static int mlx5e_bits_invert(unsigned long a, int size)
2230 {
2231 int inv = 0;
2232 int i;
2233
2234 for (i = 0; i < size; i++)
2235 inv |= (test_bit(size - i - 1, &a) ? 1 : 0) << i;
2236
2237 return inv;
2238 }
2239
2240 static void mlx5e_fill_rqt_rqns(struct mlx5e_priv *priv, int sz,
2241 struct mlx5e_redirect_rqt_param rrp, void *rqtc)
2242 {
2243 int i;
2244
2245 for (i = 0; i < sz; i++) {
2246 u32 rqn;
2247
2248 if (rrp.is_rss) {
2249 int ix = i;
2250
2251 if (rrp.rss.hfunc == ETH_RSS_HASH_XOR)
2252 ix = mlx5e_bits_invert(i, ilog2(sz));
2253
2254 ix = priv->channels.params.indirection_rqt[ix];
2255 rqn = rrp.rss.channels->c[ix]->rq.rqn;
2256 } else {
2257 rqn = rrp.rqn;
2258 }
2259 MLX5_SET(rqtc, rqtc, rq_num[i], rqn);
2260 }
2261 }
2262
2263 int mlx5e_redirect_rqt(struct mlx5e_priv *priv, u32 rqtn, int sz,
2264 struct mlx5e_redirect_rqt_param rrp)
2265 {
2266 struct mlx5_core_dev *mdev = priv->mdev;
2267 void *rqtc;
2268 int inlen;
2269 u32 *in;
2270 int err;
2271
2272 inlen = MLX5_ST_SZ_BYTES(modify_rqt_in) + sizeof(u32) * sz;
2273 in = kvzalloc(inlen, GFP_KERNEL);
2274 if (!in)
2275 return -ENOMEM;
2276
2277 rqtc = MLX5_ADDR_OF(modify_rqt_in, in, ctx);
2278
2279 MLX5_SET(rqtc, rqtc, rqt_actual_size, sz);
2280 MLX5_SET(modify_rqt_in, in, bitmask.rqn_list, 1);
2281 mlx5e_fill_rqt_rqns(priv, sz, rrp, rqtc);
2282 err = mlx5_core_modify_rqt(mdev, rqtn, in, inlen);
2283
2284 kvfree(in);
2285 return err;
2286 }
2287
2288 static u32 mlx5e_get_direct_rqn(struct mlx5e_priv *priv, int ix,
2289 struct mlx5e_redirect_rqt_param rrp)
2290 {
2291 if (!rrp.is_rss)
2292 return rrp.rqn;
2293
2294 if (ix >= rrp.rss.channels->num)
2295 return priv->drop_rq.rqn;
2296
2297 return rrp.rss.channels->c[ix]->rq.rqn;
2298 }
2299
2300 static void mlx5e_redirect_rqts(struct mlx5e_priv *priv,
2301 struct mlx5e_redirect_rqt_param rrp)
2302 {
2303 u32 rqtn;
2304 int ix;
2305
2306 if (priv->indir_rqt.enabled) {
2307 /* RSS RQ table */
2308 rqtn = priv->indir_rqt.rqtn;
2309 mlx5e_redirect_rqt(priv, rqtn, MLX5E_INDIR_RQT_SIZE, rrp);
2310 }
2311
2312 for (ix = 0; ix < priv->profile->max_nch(priv->mdev); ix++) {
2313 struct mlx5e_redirect_rqt_param direct_rrp = {
2314 .is_rss = false,
2315 {
2316 .rqn = mlx5e_get_direct_rqn(priv, ix, rrp)
2317 },
2318 };
2319
2320 /* Direct RQ Tables */
2321 if (!priv->direct_tir[ix].rqt.enabled)
2322 continue;
2323
2324 rqtn = priv->direct_tir[ix].rqt.rqtn;
2325 mlx5e_redirect_rqt(priv, rqtn, 1, direct_rrp);
2326 }
2327 }
2328
2329 static void mlx5e_redirect_rqts_to_channels(struct mlx5e_priv *priv,
2330 struct mlx5e_channels *chs)
2331 {
2332 struct mlx5e_redirect_rqt_param rrp = {
2333 .is_rss = true,
2334 {
2335 .rss = {
2336 .channels = chs,
2337 .hfunc = chs->params.rss_hfunc,
2338 }
2339 },
2340 };
2341
2342 mlx5e_redirect_rqts(priv, rrp);
2343 }
2344
2345 static void mlx5e_redirect_rqts_to_drop(struct mlx5e_priv *priv)
2346 {
2347 struct mlx5e_redirect_rqt_param drop_rrp = {
2348 .is_rss = false,
2349 {
2350 .rqn = priv->drop_rq.rqn,
2351 },
2352 };
2353
2354 mlx5e_redirect_rqts(priv, drop_rrp);
2355 }
2356
2357 static void mlx5e_build_tir_ctx_lro(struct mlx5e_params *params, void *tirc)
2358 {
2359 if (!params->lro_en)
2360 return;
2361
2362 #define ROUGH_MAX_L2_L3_HDR_SZ 256
2363
2364 MLX5_SET(tirc, tirc, lro_enable_mask,
2365 MLX5_TIRC_LRO_ENABLE_MASK_IPV4_LRO |
2366 MLX5_TIRC_LRO_ENABLE_MASK_IPV6_LRO);
2367 MLX5_SET(tirc, tirc, lro_max_ip_payload_size,
2368 (params->lro_wqe_sz - ROUGH_MAX_L2_L3_HDR_SZ) >> 8);
2369 MLX5_SET(tirc, tirc, lro_timeout_period_usecs, params->lro_timeout);
2370 }
2371
2372 void mlx5e_build_indir_tir_ctx_hash(struct mlx5e_params *params,
2373 enum mlx5e_traffic_types tt,
2374 void *tirc, bool inner)
2375 {
2376 void *hfso = inner ? MLX5_ADDR_OF(tirc, tirc, rx_hash_field_selector_inner) :
2377 MLX5_ADDR_OF(tirc, tirc, rx_hash_field_selector_outer);
2378
2379 #define MLX5_HASH_IP (MLX5_HASH_FIELD_SEL_SRC_IP |\
2380 MLX5_HASH_FIELD_SEL_DST_IP)
2381
2382 #define MLX5_HASH_IP_L4PORTS (MLX5_HASH_FIELD_SEL_SRC_IP |\
2383 MLX5_HASH_FIELD_SEL_DST_IP |\
2384 MLX5_HASH_FIELD_SEL_L4_SPORT |\
2385 MLX5_HASH_FIELD_SEL_L4_DPORT)
2386
2387 #define MLX5_HASH_IP_IPSEC_SPI (MLX5_HASH_FIELD_SEL_SRC_IP |\
2388 MLX5_HASH_FIELD_SEL_DST_IP |\
2389 MLX5_HASH_FIELD_SEL_IPSEC_SPI)
2390
2391 MLX5_SET(tirc, tirc, rx_hash_fn, mlx5e_rx_hash_fn(params->rss_hfunc));
2392 if (params->rss_hfunc == ETH_RSS_HASH_TOP) {
2393 void *rss_key = MLX5_ADDR_OF(tirc, tirc,
2394 rx_hash_toeplitz_key);
2395 size_t len = MLX5_FLD_SZ_BYTES(tirc,
2396 rx_hash_toeplitz_key);
2397
2398 MLX5_SET(tirc, tirc, rx_hash_symmetric, 1);
2399 memcpy(rss_key, params->toeplitz_hash_key, len);
2400 }
2401
2402 switch (tt) {
2403 case MLX5E_TT_IPV4_TCP:
2404 MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
2405 MLX5_L3_PROT_TYPE_IPV4);
2406 MLX5_SET(rx_hash_field_select, hfso, l4_prot_type,
2407 MLX5_L4_PROT_TYPE_TCP);
2408 MLX5_SET(rx_hash_field_select, hfso, selected_fields,
2409 MLX5_HASH_IP_L4PORTS);
2410 break;
2411
2412 case MLX5E_TT_IPV6_TCP:
2413 MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
2414 MLX5_L3_PROT_TYPE_IPV6);
2415 MLX5_SET(rx_hash_field_select, hfso, l4_prot_type,
2416 MLX5_L4_PROT_TYPE_TCP);
2417 MLX5_SET(rx_hash_field_select, hfso, selected_fields,
2418 MLX5_HASH_IP_L4PORTS);
2419 break;
2420
2421 case MLX5E_TT_IPV4_UDP:
2422 MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
2423 MLX5_L3_PROT_TYPE_IPV4);
2424 MLX5_SET(rx_hash_field_select, hfso, l4_prot_type,
2425 MLX5_L4_PROT_TYPE_UDP);
2426 MLX5_SET(rx_hash_field_select, hfso, selected_fields,
2427 MLX5_HASH_IP_L4PORTS);
2428 break;
2429
2430 case MLX5E_TT_IPV6_UDP:
2431 MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
2432 MLX5_L3_PROT_TYPE_IPV6);
2433 MLX5_SET(rx_hash_field_select, hfso, l4_prot_type,
2434 MLX5_L4_PROT_TYPE_UDP);
2435 MLX5_SET(rx_hash_field_select, hfso, selected_fields,
2436 MLX5_HASH_IP_L4PORTS);
2437 break;
2438
2439 case MLX5E_TT_IPV4_IPSEC_AH:
2440 MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
2441 MLX5_L3_PROT_TYPE_IPV4);
2442 MLX5_SET(rx_hash_field_select, hfso, selected_fields,
2443 MLX5_HASH_IP_IPSEC_SPI);
2444 break;
2445
2446 case MLX5E_TT_IPV6_IPSEC_AH:
2447 MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
2448 MLX5_L3_PROT_TYPE_IPV6);
2449 MLX5_SET(rx_hash_field_select, hfso, selected_fields,
2450 MLX5_HASH_IP_IPSEC_SPI);
2451 break;
2452
2453 case MLX5E_TT_IPV4_IPSEC_ESP:
2454 MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
2455 MLX5_L3_PROT_TYPE_IPV4);
2456 MLX5_SET(rx_hash_field_select, hfso, selected_fields,
2457 MLX5_HASH_IP_IPSEC_SPI);
2458 break;
2459
2460 case MLX5E_TT_IPV6_IPSEC_ESP:
2461 MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
2462 MLX5_L3_PROT_TYPE_IPV6);
2463 MLX5_SET(rx_hash_field_select, hfso, selected_fields,
2464 MLX5_HASH_IP_IPSEC_SPI);
2465 break;
2466
2467 case MLX5E_TT_IPV4:
2468 MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
2469 MLX5_L3_PROT_TYPE_IPV4);
2470 MLX5_SET(rx_hash_field_select, hfso, selected_fields,
2471 MLX5_HASH_IP);
2472 break;
2473
2474 case MLX5E_TT_IPV6:
2475 MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
2476 MLX5_L3_PROT_TYPE_IPV6);
2477 MLX5_SET(rx_hash_field_select, hfso, selected_fields,
2478 MLX5_HASH_IP);
2479 break;
2480 default:
2481 WARN_ONCE(true, "%s: bad traffic type!\n", __func__);
2482 }
2483 }
2484
2485 static int mlx5e_modify_tirs_lro(struct mlx5e_priv *priv)
2486 {
2487 struct mlx5_core_dev *mdev = priv->mdev;
2488
2489 void *in;
2490 void *tirc;
2491 int inlen;
2492 int err;
2493 int tt;
2494 int ix;
2495
2496 inlen = MLX5_ST_SZ_BYTES(modify_tir_in);
2497 in = kvzalloc(inlen, GFP_KERNEL);
2498 if (!in)
2499 return -ENOMEM;
2500
2501 MLX5_SET(modify_tir_in, in, bitmask.lro, 1);
2502 tirc = MLX5_ADDR_OF(modify_tir_in, in, ctx);
2503
2504 mlx5e_build_tir_ctx_lro(&priv->channels.params, tirc);
2505
2506 for (tt = 0; tt < MLX5E_NUM_INDIR_TIRS; tt++) {
2507 err = mlx5_core_modify_tir(mdev, priv->indir_tir[tt].tirn, in,
2508 inlen);
2509 if (err)
2510 goto free_in;
2511 }
2512
2513 for (ix = 0; ix < priv->profile->max_nch(priv->mdev); ix++) {
2514 err = mlx5_core_modify_tir(mdev, priv->direct_tir[ix].tirn,
2515 in, inlen);
2516 if (err)
2517 goto free_in;
2518 }
2519
2520 free_in:
2521 kvfree(in);
2522
2523 return err;
2524 }
2525
2526 static void mlx5e_build_inner_indir_tir_ctx(struct mlx5e_priv *priv,
2527 enum mlx5e_traffic_types tt,
2528 u32 *tirc)
2529 {
2530 MLX5_SET(tirc, tirc, transport_domain, priv->mdev->mlx5e_res.td.tdn);
2531
2532 mlx5e_build_tir_ctx_lro(&priv->channels.params, tirc);
2533
2534 MLX5_SET(tirc, tirc, disp_type, MLX5_TIRC_DISP_TYPE_INDIRECT);
2535 MLX5_SET(tirc, tirc, indirect_table, priv->indir_rqt.rqtn);
2536 MLX5_SET(tirc, tirc, tunneled_offload_en, 0x1);
2537
2538 mlx5e_build_indir_tir_ctx_hash(&priv->channels.params, tt, tirc, true);
2539 }
2540
2541 static int mlx5e_set_mtu(struct mlx5e_priv *priv, u16 mtu)
2542 {
2543 struct mlx5_core_dev *mdev = priv->mdev;
2544 u16 hw_mtu = MLX5E_SW2HW_MTU(priv, mtu);
2545 int err;
2546
2547 err = mlx5_set_port_mtu(mdev, hw_mtu, 1);
2548 if (err)
2549 return err;
2550
2551 /* Update vport context MTU */
2552 mlx5_modify_nic_vport_mtu(mdev, hw_mtu);
2553 return 0;
2554 }
2555
2556 static void mlx5e_query_mtu(struct mlx5e_priv *priv, u16 *mtu)
2557 {
2558 struct mlx5_core_dev *mdev = priv->mdev;
2559 u16 hw_mtu = 0;
2560 int err;
2561
2562 err = mlx5_query_nic_vport_mtu(mdev, &hw_mtu);
2563 if (err || !hw_mtu) /* fallback to port oper mtu */
2564 mlx5_query_port_oper_mtu(mdev, &hw_mtu, 1);
2565
2566 *mtu = MLX5E_HW2SW_MTU(priv, hw_mtu);
2567 }
2568
2569 static int mlx5e_set_dev_port_mtu(struct mlx5e_priv *priv)
2570 {
2571 struct net_device *netdev = priv->netdev;
2572 u16 mtu;
2573 int err;
2574
2575 err = mlx5e_set_mtu(priv, netdev->mtu);
2576 if (err)
2577 return err;
2578
2579 mlx5e_query_mtu(priv, &mtu);
2580 if (mtu != netdev->mtu)
2581 netdev_warn(netdev, "%s: VPort MTU %d is different than netdev mtu %d\n",
2582 __func__, mtu, netdev->mtu);
2583
2584 netdev->mtu = mtu;
2585 return 0;
2586 }
2587
2588 static void mlx5e_netdev_set_tcs(struct net_device *netdev)
2589 {
2590 struct mlx5e_priv *priv = netdev_priv(netdev);
2591 int nch = priv->channels.params.num_channels;
2592 int ntc = priv->channels.params.num_tc;
2593 int tc;
2594
2595 netdev_reset_tc(netdev);
2596
2597 if (ntc == 1)
2598 return;
2599
2600 netdev_set_num_tc(netdev, ntc);
2601
2602 /* Map netdev TCs to offset 0
2603 * We have our own UP to TXQ mapping for QoS
2604 */
2605 for (tc = 0; tc < ntc; tc++)
2606 netdev_set_tc_queue(netdev, tc, nch, 0);
2607 }
2608
2609 static void mlx5e_build_channels_tx_maps(struct mlx5e_priv *priv)
2610 {
2611 struct mlx5e_channel *c;
2612 struct mlx5e_txqsq *sq;
2613 int i, tc;
2614
2615 for (i = 0; i < priv->channels.num; i++)
2616 for (tc = 0; tc < priv->profile->max_tc; tc++)
2617 priv->channel_tc2txq[i][tc] = i + tc * priv->channels.num;
2618
2619 for (i = 0; i < priv->channels.num; i++) {
2620 c = priv->channels.c[i];
2621 for (tc = 0; tc < c->num_tc; tc++) {
2622 sq = &c->sq[tc];
2623 priv->txq2sq[sq->txq_ix] = sq;
2624 }
2625 }
2626 }
2627
2628 void mlx5e_activate_priv_channels(struct mlx5e_priv *priv)
2629 {
2630 int num_txqs = priv->channels.num * priv->channels.params.num_tc;
2631 struct net_device *netdev = priv->netdev;
2632
2633 mlx5e_netdev_set_tcs(netdev);
2634 netif_set_real_num_tx_queues(netdev, num_txqs);
2635 netif_set_real_num_rx_queues(netdev, priv->channels.num);
2636
2637 mlx5e_build_channels_tx_maps(priv);
2638 mlx5e_activate_channels(&priv->channels);
2639 netif_tx_start_all_queues(priv->netdev);
2640
2641 if (MLX5_ESWITCH_MANAGER(priv->mdev))
2642 mlx5e_add_sqs_fwd_rules(priv);
2643
2644 mlx5e_wait_channels_min_rx_wqes(&priv->channels);
2645 mlx5e_redirect_rqts_to_channels(priv, &priv->channels);
2646 }
2647
2648 void mlx5e_deactivate_priv_channels(struct mlx5e_priv *priv)
2649 {
2650 mlx5e_redirect_rqts_to_drop(priv);
2651
2652 if (MLX5_ESWITCH_MANAGER(priv->mdev))
2653 mlx5e_remove_sqs_fwd_rules(priv);
2654
2655 /* FIXME: This is a W/A only for tx timeout watch dog false alarm when
2656 * polling for inactive tx queues.
2657 */
2658 netif_tx_stop_all_queues(priv->netdev);
2659 netif_tx_disable(priv->netdev);
2660 mlx5e_deactivate_channels(&priv->channels);
2661 }
2662
2663 void mlx5e_switch_priv_channels(struct mlx5e_priv *priv,
2664 struct mlx5e_channels *new_chs,
2665 mlx5e_fp_hw_modify hw_modify)
2666 {
2667 struct net_device *netdev = priv->netdev;
2668 int new_num_txqs;
2669 int carrier_ok;
2670 new_num_txqs = new_chs->num * new_chs->params.num_tc;
2671
2672 carrier_ok = netif_carrier_ok(netdev);
2673 netif_carrier_off(netdev);
2674
2675 if (new_num_txqs < netdev->real_num_tx_queues)
2676 netif_set_real_num_tx_queues(netdev, new_num_txqs);
2677
2678 mlx5e_deactivate_priv_channels(priv);
2679 mlx5e_close_channels(&priv->channels);
2680
2681 priv->channels = *new_chs;
2682
2683 /* New channels are ready to roll, modify HW settings if needed */
2684 if (hw_modify)
2685 hw_modify(priv);
2686
2687 mlx5e_refresh_tirs(priv, false);
2688 mlx5e_activate_priv_channels(priv);
2689
2690 /* return carrier back if needed */
2691 if (carrier_ok)
2692 netif_carrier_on(netdev);
2693 }
2694
2695 void mlx5e_timestamp_init(struct mlx5e_priv *priv)
2696 {
2697 priv->tstamp.tx_type = HWTSTAMP_TX_OFF;
2698 priv->tstamp.rx_filter = HWTSTAMP_FILTER_NONE;
2699 }
2700
2701 int mlx5e_open_locked(struct net_device *netdev)
2702 {
2703 struct mlx5e_priv *priv = netdev_priv(netdev);
2704 int err;
2705
2706 set_bit(MLX5E_STATE_OPENED, &priv->state);
2707
2708 err = mlx5e_open_channels(priv, &priv->channels);
2709 if (err)
2710 goto err_clear_state_opened_flag;
2711
2712 mlx5e_refresh_tirs(priv, false);
2713 mlx5e_activate_priv_channels(priv);
2714 if (priv->profile->update_carrier)
2715 priv->profile->update_carrier(priv);
2716
2717 if (priv->profile->update_stats)
2718 queue_delayed_work(priv->wq, &priv->update_stats_work, 0);
2719
2720 return 0;
2721
2722 err_clear_state_opened_flag:
2723 clear_bit(MLX5E_STATE_OPENED, &priv->state);
2724 return err;
2725 }
2726
2727 int mlx5e_open(struct net_device *netdev)
2728 {
2729 struct mlx5e_priv *priv = netdev_priv(netdev);
2730 int err;
2731
2732 mutex_lock(&priv->state_lock);
2733 err = mlx5e_open_locked(netdev);
2734 if (!err)
2735 mlx5_set_port_admin_status(priv->mdev, MLX5_PORT_UP);
2736 mutex_unlock(&priv->state_lock);
2737
2738 if (mlx5e_vxlan_allowed(priv->mdev))
2739 udp_tunnel_get_rx_info(netdev);
2740
2741 return err;
2742 }
2743
2744 int mlx5e_close_locked(struct net_device *netdev)
2745 {
2746 struct mlx5e_priv *priv = netdev_priv(netdev);
2747
2748 /* May already be CLOSED in case a previous configuration operation
2749 * (e.g RX/TX queue size change) that involves close&open failed.
2750 */
2751 if (!test_bit(MLX5E_STATE_OPENED, &priv->state))
2752 return 0;
2753
2754 clear_bit(MLX5E_STATE_OPENED, &priv->state);
2755
2756 netif_carrier_off(priv->netdev);
2757 mlx5e_deactivate_priv_channels(priv);
2758 mlx5e_close_channels(&priv->channels);
2759
2760 return 0;
2761 }
2762
2763 int mlx5e_close(struct net_device *netdev)
2764 {
2765 struct mlx5e_priv *priv = netdev_priv(netdev);
2766 int err;
2767
2768 if (!netif_device_present(netdev))
2769 return -ENODEV;
2770
2771 mutex_lock(&priv->state_lock);
2772 mlx5_set_port_admin_status(priv->mdev, MLX5_PORT_DOWN);
2773 err = mlx5e_close_locked(netdev);
2774 mutex_unlock(&priv->state_lock);
2775
2776 return err;
2777 }
2778
2779 static int mlx5e_alloc_drop_rq(struct mlx5_core_dev *mdev,
2780 struct mlx5e_rq *rq,
2781 struct mlx5e_rq_param *param)
2782 {
2783 void *rqc = param->rqc;
2784 void *rqc_wq = MLX5_ADDR_OF(rqc, rqc, wq);
2785 int err;
2786
2787 param->wq.db_numa_node = param->wq.buf_numa_node;
2788
2789 err = mlx5_wq_ll_create(mdev, &param->wq, rqc_wq, &rq->wq,
2790 &rq->wq_ctrl);
2791 if (err)
2792 return err;
2793
2794 rq->mdev = mdev;
2795
2796 return 0;
2797 }
2798
2799 static int mlx5e_alloc_drop_cq(struct mlx5_core_dev *mdev,
2800 struct mlx5e_cq *cq,
2801 struct mlx5e_cq_param *param)
2802 {
2803 param->wq.buf_numa_node = dev_to_node(&mdev->pdev->dev);
2804 param->wq.db_numa_node = dev_to_node(&mdev->pdev->dev);
2805
2806 return mlx5e_alloc_cq_common(mdev, param, cq);
2807 }
2808
2809 static int mlx5e_open_drop_rq(struct mlx5_core_dev *mdev,
2810 struct mlx5e_rq *drop_rq)
2811 {
2812 struct mlx5e_cq_param cq_param = {};
2813 struct mlx5e_rq_param rq_param = {};
2814 struct mlx5e_cq *cq = &drop_rq->cq;
2815 int err;
2816
2817 mlx5e_build_drop_rq_param(mdev, &rq_param);
2818
2819 err = mlx5e_alloc_drop_cq(mdev, cq, &cq_param);
2820 if (err)
2821 return err;
2822
2823 err = mlx5e_create_cq(cq, &cq_param);
2824 if (err)
2825 goto err_free_cq;
2826
2827 err = mlx5e_alloc_drop_rq(mdev, drop_rq, &rq_param);
2828 if (err)
2829 goto err_destroy_cq;
2830
2831 err = mlx5e_create_rq(drop_rq, &rq_param);
2832 if (err)
2833 goto err_free_rq;
2834
2835 return 0;
2836
2837 err_free_rq:
2838 mlx5e_free_rq(drop_rq);
2839
2840 err_destroy_cq:
2841 mlx5e_destroy_cq(cq);
2842
2843 err_free_cq:
2844 mlx5e_free_cq(cq);
2845
2846 return err;
2847 }
2848
2849 static void mlx5e_close_drop_rq(struct mlx5e_rq *drop_rq)
2850 {
2851 mlx5e_destroy_rq(drop_rq);
2852 mlx5e_free_rq(drop_rq);
2853 mlx5e_destroy_cq(&drop_rq->cq);
2854 mlx5e_free_cq(&drop_rq->cq);
2855 }
2856
2857 int mlx5e_create_tis(struct mlx5_core_dev *mdev, int tc,
2858 u32 underlay_qpn, u32 *tisn)
2859 {
2860 u32 in[MLX5_ST_SZ_DW(create_tis_in)] = {0};
2861 void *tisc = MLX5_ADDR_OF(create_tis_in, in, ctx);
2862
2863 MLX5_SET(tisc, tisc, prio, tc << 1);
2864 MLX5_SET(tisc, tisc, underlay_qpn, underlay_qpn);
2865 MLX5_SET(tisc, tisc, transport_domain, mdev->mlx5e_res.td.tdn);
2866
2867 if (mlx5_lag_is_lacp_owner(mdev))
2868 MLX5_SET(tisc, tisc, strict_lag_tx_port_affinity, 1);
2869
2870 return mlx5_core_create_tis(mdev, in, sizeof(in), tisn);
2871 }
2872
2873 void mlx5e_destroy_tis(struct mlx5_core_dev *mdev, u32 tisn)
2874 {
2875 mlx5_core_destroy_tis(mdev, tisn);
2876 }
2877
2878 int mlx5e_create_tises(struct mlx5e_priv *priv)
2879 {
2880 int err;
2881 int tc;
2882
2883 for (tc = 0; tc < priv->profile->max_tc; tc++) {
2884 err = mlx5e_create_tis(priv->mdev, tc, 0, &priv->tisn[tc]);
2885 if (err)
2886 goto err_close_tises;
2887 }
2888
2889 return 0;
2890
2891 err_close_tises:
2892 for (tc--; tc >= 0; tc--)
2893 mlx5e_destroy_tis(priv->mdev, priv->tisn[tc]);
2894
2895 return err;
2896 }
2897
2898 void mlx5e_cleanup_nic_tx(struct mlx5e_priv *priv)
2899 {
2900 int tc;
2901
2902 for (tc = 0; tc < priv->profile->max_tc; tc++)
2903 mlx5e_destroy_tis(priv->mdev, priv->tisn[tc]);
2904 }
2905
2906 static void mlx5e_build_indir_tir_ctx(struct mlx5e_priv *priv,
2907 enum mlx5e_traffic_types tt,
2908 u32 *tirc)
2909 {
2910 MLX5_SET(tirc, tirc, transport_domain, priv->mdev->mlx5e_res.td.tdn);
2911
2912 mlx5e_build_tir_ctx_lro(&priv->channels.params, tirc);
2913
2914 MLX5_SET(tirc, tirc, disp_type, MLX5_TIRC_DISP_TYPE_INDIRECT);
2915 MLX5_SET(tirc, tirc, indirect_table, priv->indir_rqt.rqtn);
2916 mlx5e_build_indir_tir_ctx_hash(&priv->channels.params, tt, tirc, false);
2917 }
2918
2919 static void mlx5e_build_direct_tir_ctx(struct mlx5e_priv *priv, u32 rqtn, u32 *tirc)
2920 {
2921 MLX5_SET(tirc, tirc, transport_domain, priv->mdev->mlx5e_res.td.tdn);
2922
2923 mlx5e_build_tir_ctx_lro(&priv->channels.params, tirc);
2924
2925 MLX5_SET(tirc, tirc, disp_type, MLX5_TIRC_DISP_TYPE_INDIRECT);
2926 MLX5_SET(tirc, tirc, indirect_table, rqtn);
2927 MLX5_SET(tirc, tirc, rx_hash_fn, MLX5_RX_HASH_FN_INVERTED_XOR8);
2928 }
2929
2930 int mlx5e_create_indirect_tirs(struct mlx5e_priv *priv)
2931 {
2932 struct mlx5e_tir *tir;
2933 void *tirc;
2934 int inlen;
2935 int i = 0;
2936 int err;
2937 u32 *in;
2938 int tt;
2939
2940 inlen = MLX5_ST_SZ_BYTES(create_tir_in);
2941 in = kvzalloc(inlen, GFP_KERNEL);
2942 if (!in)
2943 return -ENOMEM;
2944
2945 for (tt = 0; tt < MLX5E_NUM_INDIR_TIRS; tt++) {
2946 memset(in, 0, inlen);
2947 tir = &priv->indir_tir[tt];
2948 tirc = MLX5_ADDR_OF(create_tir_in, in, ctx);
2949 mlx5e_build_indir_tir_ctx(priv, tt, tirc);
2950 err = mlx5e_create_tir(priv->mdev, tir, in, inlen);
2951 if (err) {
2952 mlx5_core_warn(priv->mdev, "create indirect tirs failed, %d\n", err);
2953 goto err_destroy_inner_tirs;
2954 }
2955 }
2956
2957 if (!mlx5e_tunnel_inner_ft_supported(priv->mdev))
2958 goto out;
2959
2960 for (i = 0; i < MLX5E_NUM_INDIR_TIRS; i++) {
2961 memset(in, 0, inlen);
2962 tir = &priv->inner_indir_tir[i];
2963 tirc = MLX5_ADDR_OF(create_tir_in, in, ctx);
2964 mlx5e_build_inner_indir_tir_ctx(priv, i, tirc);
2965 err = mlx5e_create_tir(priv->mdev, tir, in, inlen);
2966 if (err) {
2967 mlx5_core_warn(priv->mdev, "create inner indirect tirs failed, %d\n", err);
2968 goto err_destroy_inner_tirs;
2969 }
2970 }
2971
2972 out:
2973 kvfree(in);
2974
2975 return 0;
2976
2977 err_destroy_inner_tirs:
2978 for (i--; i >= 0; i--)
2979 mlx5e_destroy_tir(priv->mdev, &priv->inner_indir_tir[i]);
2980
2981 for (tt--; tt >= 0; tt--)
2982 mlx5e_destroy_tir(priv->mdev, &priv->indir_tir[tt]);
2983
2984 kvfree(in);
2985
2986 return err;
2987 }
2988
2989 int mlx5e_create_direct_tirs(struct mlx5e_priv *priv)
2990 {
2991 int nch = priv->profile->max_nch(priv->mdev);
2992 struct mlx5e_tir *tir;
2993 void *tirc;
2994 int inlen;
2995 int err;
2996 u32 *in;
2997 int ix;
2998
2999 inlen = MLX5_ST_SZ_BYTES(create_tir_in);
3000 in = kvzalloc(inlen, GFP_KERNEL);
3001 if (!in)
3002 return -ENOMEM;
3003
3004 for (ix = 0; ix < nch; ix++) {
3005 memset(in, 0, inlen);
3006 tir = &priv->direct_tir[ix];
3007 tirc = MLX5_ADDR_OF(create_tir_in, in, ctx);
3008 mlx5e_build_direct_tir_ctx(priv, priv->direct_tir[ix].rqt.rqtn, tirc);
3009 err = mlx5e_create_tir(priv->mdev, tir, in, inlen);
3010 if (err)
3011 goto err_destroy_ch_tirs;
3012 }
3013
3014 kvfree(in);
3015
3016 return 0;
3017
3018 err_destroy_ch_tirs:
3019 mlx5_core_warn(priv->mdev, "create direct tirs failed, %d\n", err);
3020 for (ix--; ix >= 0; ix--)
3021 mlx5e_destroy_tir(priv->mdev, &priv->direct_tir[ix]);
3022
3023 kvfree(in);
3024
3025 return err;
3026 }
3027
3028 void mlx5e_destroy_indirect_tirs(struct mlx5e_priv *priv)
3029 {
3030 int i;
3031
3032 for (i = 0; i < MLX5E_NUM_INDIR_TIRS; i++)
3033 mlx5e_destroy_tir(priv->mdev, &priv->indir_tir[i]);
3034
3035 if (!mlx5e_tunnel_inner_ft_supported(priv->mdev))
3036 return;
3037
3038 for (i = 0; i < MLX5E_NUM_INDIR_TIRS; i++)
3039 mlx5e_destroy_tir(priv->mdev, &priv->inner_indir_tir[i]);
3040 }
3041
3042 void mlx5e_destroy_direct_tirs(struct mlx5e_priv *priv)
3043 {
3044 int nch = priv->profile->max_nch(priv->mdev);
3045 int i;
3046
3047 for (i = 0; i < nch; i++)
3048 mlx5e_destroy_tir(priv->mdev, &priv->direct_tir[i]);
3049 }
3050
3051 static int mlx5e_modify_channels_scatter_fcs(struct mlx5e_channels *chs, bool enable)
3052 {
3053 int err = 0;
3054 int i;
3055
3056 for (i = 0; i < chs->num; i++) {
3057 err = mlx5e_modify_rq_scatter_fcs(&chs->c[i]->rq, enable);
3058 if (err)
3059 return err;
3060 }
3061
3062 return 0;
3063 }
3064
3065 static int mlx5e_modify_channels_vsd(struct mlx5e_channels *chs, bool vsd)
3066 {
3067 int err = 0;
3068 int i;
3069
3070 for (i = 0; i < chs->num; i++) {
3071 err = mlx5e_modify_rq_vsd(&chs->c[i]->rq, vsd);
3072 if (err)
3073 return err;
3074 }
3075
3076 return 0;
3077 }
3078
3079 static int mlx5e_setup_tc_mqprio(struct net_device *netdev,
3080 struct tc_mqprio_qopt *mqprio)
3081 {
3082 struct mlx5e_priv *priv = netdev_priv(netdev);
3083 struct mlx5e_channels new_channels = {};
3084 u8 tc = mqprio->num_tc;
3085 int err = 0;
3086
3087 mqprio->hw = TC_MQPRIO_HW_OFFLOAD_TCS;
3088
3089 if (tc && tc != MLX5E_MAX_NUM_TC)
3090 return -EINVAL;
3091
3092 mutex_lock(&priv->state_lock);
3093
3094 new_channels.params = priv->channels.params;
3095 new_channels.params.num_tc = tc ? tc : 1;
3096
3097 if (!test_bit(MLX5E_STATE_OPENED, &priv->state)) {
3098 priv->channels.params = new_channels.params;
3099 goto out;
3100 }
3101
3102 err = mlx5e_open_channels(priv, &new_channels);
3103 if (err)
3104 goto out;
3105
3106 mlx5e_switch_priv_channels(priv, &new_channels, NULL);
3107 out:
3108 mutex_unlock(&priv->state_lock);
3109 return err;
3110 }
3111
3112 #ifdef CONFIG_MLX5_ESWITCH
3113 static int mlx5e_setup_tc_cls_flower(struct mlx5e_priv *priv,
3114 struct tc_cls_flower_offload *cls_flower)
3115 {
3116 if (cls_flower->common.chain_index)
3117 return -EOPNOTSUPP;
3118
3119 switch (cls_flower->command) {
3120 case TC_CLSFLOWER_REPLACE:
3121 return mlx5e_configure_flower(priv, cls_flower);
3122 case TC_CLSFLOWER_DESTROY:
3123 return mlx5e_delete_flower(priv, cls_flower);
3124 case TC_CLSFLOWER_STATS:
3125 return mlx5e_stats_flower(priv, cls_flower);
3126 default:
3127 return -EOPNOTSUPP;
3128 }
3129 }
3130
3131 int mlx5e_setup_tc_block_cb(enum tc_setup_type type, void *type_data,
3132 void *cb_priv)
3133 {
3134 struct mlx5e_priv *priv = cb_priv;
3135
3136 if (!tc_can_offload(priv->netdev))
3137 return -EOPNOTSUPP;
3138
3139 switch (type) {
3140 case TC_SETUP_CLSFLOWER:
3141 return mlx5e_setup_tc_cls_flower(priv, type_data);
3142 default:
3143 return -EOPNOTSUPP;
3144 }
3145 }
3146
3147 static int mlx5e_setup_tc_block(struct net_device *dev,
3148 struct tc_block_offload *f)
3149 {
3150 struct mlx5e_priv *priv = netdev_priv(dev);
3151
3152 if (f->binder_type != TCF_BLOCK_BINDER_TYPE_CLSACT_INGRESS)
3153 return -EOPNOTSUPP;
3154
3155 switch (f->command) {
3156 case TC_BLOCK_BIND:
3157 return tcf_block_cb_register(f->block, mlx5e_setup_tc_block_cb,
3158 priv, priv);
3159 case TC_BLOCK_UNBIND:
3160 tcf_block_cb_unregister(f->block, mlx5e_setup_tc_block_cb,
3161 priv);
3162 return 0;
3163 default:
3164 return -EOPNOTSUPP;
3165 }
3166 }
3167 #endif
3168
3169 int mlx5e_setup_tc(struct net_device *dev, enum tc_setup_type type,
3170 void *type_data)
3171 {
3172 switch (type) {
3173 #ifdef CONFIG_MLX5_ESWITCH
3174 case TC_SETUP_BLOCK:
3175 return mlx5e_setup_tc_block(dev, type_data);
3176 #endif
3177 case TC_SETUP_QDISC_MQPRIO:
3178 return mlx5e_setup_tc_mqprio(dev, type_data);
3179 default:
3180 return -EOPNOTSUPP;
3181 }
3182 }
3183
3184 static void
3185 mlx5e_get_stats(struct net_device *dev, struct rtnl_link_stats64 *stats)
3186 {
3187 struct mlx5e_priv *priv = netdev_priv(dev);
3188 struct mlx5e_sw_stats *sstats = &priv->stats.sw;
3189 struct mlx5e_vport_stats *vstats = &priv->stats.vport;
3190 struct mlx5e_pport_stats *pstats = &priv->stats.pport;
3191
3192 if (mlx5e_is_uplink_rep(priv)) {
3193 stats->rx_packets = PPORT_802_3_GET(pstats, a_frames_received_ok);
3194 stats->rx_bytes = PPORT_802_3_GET(pstats, a_octets_received_ok);
3195 stats->tx_packets = PPORT_802_3_GET(pstats, a_frames_transmitted_ok);
3196 stats->tx_bytes = PPORT_802_3_GET(pstats, a_octets_transmitted_ok);
3197 } else {
3198 stats->rx_packets = sstats->rx_packets;
3199 stats->rx_bytes = sstats->rx_bytes;
3200 stats->tx_packets = sstats->tx_packets;
3201 stats->tx_bytes = sstats->tx_bytes;
3202 stats->tx_dropped = sstats->tx_queue_dropped;
3203 }
3204
3205 stats->rx_dropped = priv->stats.qcnt.rx_out_of_buffer;
3206
3207 stats->rx_length_errors =
3208 PPORT_802_3_GET(pstats, a_in_range_length_errors) +
3209 PPORT_802_3_GET(pstats, a_out_of_range_length_field) +
3210 PPORT_802_3_GET(pstats, a_frame_too_long_errors);
3211 stats->rx_crc_errors =
3212 PPORT_802_3_GET(pstats, a_frame_check_sequence_errors);
3213 stats->rx_frame_errors = PPORT_802_3_GET(pstats, a_alignment_errors);
3214 stats->tx_aborted_errors = PPORT_2863_GET(pstats, if_out_discards);
3215 stats->rx_errors = stats->rx_length_errors + stats->rx_crc_errors +
3216 stats->rx_frame_errors;
3217 stats->tx_errors = stats->tx_aborted_errors + stats->tx_carrier_errors;
3218
3219 /* vport multicast also counts packets that are dropped due to steering
3220 * or rx out of buffer
3221 */
3222 stats->multicast =
3223 VPORT_COUNTER_GET(vstats, received_eth_multicast.packets);
3224 }
3225
3226 static void mlx5e_set_rx_mode(struct net_device *dev)
3227 {
3228 struct mlx5e_priv *priv = netdev_priv(dev);
3229
3230 queue_work(priv->wq, &priv->set_rx_mode_work);
3231 }
3232
3233 static int mlx5e_set_mac(struct net_device *netdev, void *addr)
3234 {
3235 struct mlx5e_priv *priv = netdev_priv(netdev);
3236 struct sockaddr *saddr = addr;
3237
3238 if (!is_valid_ether_addr(saddr->sa_data))
3239 return -EADDRNOTAVAIL;
3240
3241 netif_addr_lock_bh(netdev);
3242 ether_addr_copy(netdev->dev_addr, saddr->sa_data);
3243 netif_addr_unlock_bh(netdev);
3244
3245 queue_work(priv->wq, &priv->set_rx_mode_work);
3246
3247 return 0;
3248 }
3249
3250 #define MLX5E_SET_FEATURE(features, feature, enable) \
3251 do { \
3252 if (enable) \
3253 *features |= feature; \
3254 else \
3255 *features &= ~feature; \
3256 } while (0)
3257
3258 typedef int (*mlx5e_feature_handler)(struct net_device *netdev, bool enable);
3259
3260 static int set_feature_lro(struct net_device *netdev, bool enable)
3261 {
3262 struct mlx5e_priv *priv = netdev_priv(netdev);
3263 struct mlx5e_channels new_channels = {};
3264 int err = 0;
3265 bool reset;
3266
3267 mutex_lock(&priv->state_lock);
3268
3269 reset = (priv->channels.params.rq_wq_type == MLX5_WQ_TYPE_LINKED_LIST);
3270 reset = reset && test_bit(MLX5E_STATE_OPENED, &priv->state);
3271
3272 new_channels.params = priv->channels.params;
3273 new_channels.params.lro_en = enable;
3274
3275 if (!reset) {
3276 priv->channels.params = new_channels.params;
3277 err = mlx5e_modify_tirs_lro(priv);
3278 goto out;
3279 }
3280
3281 err = mlx5e_open_channels(priv, &new_channels);
3282 if (err)
3283 goto out;
3284
3285 mlx5e_switch_priv_channels(priv, &new_channels, mlx5e_modify_tirs_lro);
3286 out:
3287 mutex_unlock(&priv->state_lock);
3288 return err;
3289 }
3290
3291 static int set_feature_cvlan_filter(struct net_device *netdev, bool enable)
3292 {
3293 struct mlx5e_priv *priv = netdev_priv(netdev);
3294
3295 if (enable)
3296 mlx5e_enable_cvlan_filter(priv);
3297 else
3298 mlx5e_disable_cvlan_filter(priv);
3299
3300 return 0;
3301 }
3302
3303 #ifdef CONFIG_MLX5_ESWITCH
3304 static int set_feature_tc_num_filters(struct net_device *netdev, bool enable)
3305 {
3306 struct mlx5e_priv *priv = netdev_priv(netdev);
3307
3308 if (!enable && mlx5e_tc_num_filters(priv)) {
3309 netdev_err(netdev,
3310 "Active offloaded tc filters, can't turn hw_tc_offload off\n");
3311 return -EINVAL;
3312 }
3313
3314 return 0;
3315 }
3316 #endif
3317
3318 static int set_feature_rx_all(struct net_device *netdev, bool enable)
3319 {
3320 struct mlx5e_priv *priv = netdev_priv(netdev);
3321 struct mlx5_core_dev *mdev = priv->mdev;
3322
3323 return mlx5_set_port_fcs(mdev, !enable);
3324 }
3325
3326 static int set_feature_rx_fcs(struct net_device *netdev, bool enable)
3327 {
3328 struct mlx5e_priv *priv = netdev_priv(netdev);
3329 int err;
3330
3331 mutex_lock(&priv->state_lock);
3332
3333 priv->channels.params.scatter_fcs_en = enable;
3334 err = mlx5e_modify_channels_scatter_fcs(&priv->channels, enable);
3335 if (err)
3336 priv->channels.params.scatter_fcs_en = !enable;
3337
3338 mutex_unlock(&priv->state_lock);
3339
3340 return err;
3341 }
3342
3343 static int set_feature_rx_vlan(struct net_device *netdev, bool enable)
3344 {
3345 struct mlx5e_priv *priv = netdev_priv(netdev);
3346 int err = 0;
3347
3348 mutex_lock(&priv->state_lock);
3349
3350 priv->channels.params.vlan_strip_disable = !enable;
3351 if (!test_bit(MLX5E_STATE_OPENED, &priv->state))
3352 goto unlock;
3353
3354 err = mlx5e_modify_channels_vsd(&priv->channels, !enable);
3355 if (err)
3356 priv->channels.params.vlan_strip_disable = enable;
3357
3358 unlock:
3359 mutex_unlock(&priv->state_lock);
3360
3361 return err;
3362 }
3363
3364 #ifdef CONFIG_RFS_ACCEL
3365 static int set_feature_arfs(struct net_device *netdev, bool enable)
3366 {
3367 struct mlx5e_priv *priv = netdev_priv(netdev);
3368 int err;
3369
3370 if (enable)
3371 err = mlx5e_arfs_enable(priv);
3372 else
3373 err = mlx5e_arfs_disable(priv);
3374
3375 return err;
3376 }
3377 #endif
3378
3379 static int mlx5e_handle_feature(struct net_device *netdev,
3380 netdev_features_t *features,
3381 netdev_features_t wanted_features,
3382 netdev_features_t feature,
3383 mlx5e_feature_handler feature_handler)
3384 {
3385 netdev_features_t changes = wanted_features ^ netdev->features;
3386 bool enable = !!(wanted_features & feature);
3387 int err;
3388
3389 if (!(changes & feature))
3390 return 0;
3391
3392 err = feature_handler(netdev, enable);
3393 if (err) {
3394 netdev_err(netdev, "%s feature %pNF failed, err %d\n",
3395 enable ? "Enable" : "Disable", &feature, err);
3396 return err;
3397 }
3398
3399 MLX5E_SET_FEATURE(features, feature, enable);
3400 return 0;
3401 }
3402
3403 static int mlx5e_set_features(struct net_device *netdev,
3404 netdev_features_t features)
3405 {
3406 netdev_features_t oper_features = netdev->features;
3407 int err;
3408
3409 err = mlx5e_handle_feature(netdev, &oper_features, features,
3410 NETIF_F_LRO, set_feature_lro);
3411 err |= mlx5e_handle_feature(netdev, &oper_features, features,
3412 NETIF_F_HW_VLAN_CTAG_FILTER,
3413 set_feature_cvlan_filter);
3414 #ifdef CONFIG_MLX5_ESWITCH
3415 err |= mlx5e_handle_feature(netdev, &oper_features, features,
3416 NETIF_F_HW_TC, set_feature_tc_num_filters);
3417 #endif
3418 err |= mlx5e_handle_feature(netdev, &oper_features, features,
3419 NETIF_F_RXALL, set_feature_rx_all);
3420 err |= mlx5e_handle_feature(netdev, &oper_features, features,
3421 NETIF_F_RXFCS, set_feature_rx_fcs);
3422 err |= mlx5e_handle_feature(netdev, &oper_features, features,
3423 NETIF_F_HW_VLAN_CTAG_RX, set_feature_rx_vlan);
3424 #ifdef CONFIG_RFS_ACCEL
3425 err |= mlx5e_handle_feature(netdev, &oper_features, features,
3426 NETIF_F_NTUPLE, set_feature_arfs);
3427 #endif
3428
3429 if (err) {
3430 netdev->features = oper_features;
3431 return -EINVAL;
3432 }
3433
3434 return 0;
3435 }
3436
3437 static netdev_features_t mlx5e_fix_features(struct net_device *netdev,
3438 netdev_features_t features)
3439 {
3440 struct mlx5e_priv *priv = netdev_priv(netdev);
3441
3442 mutex_lock(&priv->state_lock);
3443 if (!bitmap_empty(priv->fs.vlan.active_svlans, VLAN_N_VID)) {
3444 /* HW strips the outer C-tag header, this is a problem
3445 * for S-tag traffic.
3446 */
3447 features &= ~NETIF_F_HW_VLAN_CTAG_RX;
3448 if (!priv->channels.params.vlan_strip_disable)
3449 netdev_warn(netdev, "Dropping C-tag vlan stripping offload due to S-tag vlan\n");
3450 }
3451 mutex_unlock(&priv->state_lock);
3452
3453 return features;
3454 }
3455
3456 static int mlx5e_change_mtu(struct net_device *netdev, int new_mtu)
3457 {
3458 struct mlx5e_priv *priv = netdev_priv(netdev);
3459 struct mlx5e_channels new_channels = {};
3460 int curr_mtu;
3461 int err = 0;
3462 bool reset;
3463
3464 mutex_lock(&priv->state_lock);
3465
3466 reset = !priv->channels.params.lro_en &&
3467 (priv->channels.params.rq_wq_type !=
3468 MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ);
3469
3470 reset = reset && test_bit(MLX5E_STATE_OPENED, &priv->state);
3471
3472 curr_mtu = netdev->mtu;
3473 netdev->mtu = new_mtu;
3474
3475 if (!reset) {
3476 mlx5e_set_dev_port_mtu(priv);
3477 goto out;
3478 }
3479
3480 new_channels.params = priv->channels.params;
3481 err = mlx5e_open_channels(priv, &new_channels);
3482 if (err) {
3483 netdev->mtu = curr_mtu;
3484 goto out;
3485 }
3486
3487 mlx5e_switch_priv_channels(priv, &new_channels, mlx5e_set_dev_port_mtu);
3488
3489 out:
3490 mutex_unlock(&priv->state_lock);
3491 return err;
3492 }
3493
3494 int mlx5e_hwstamp_set(struct mlx5e_priv *priv, struct ifreq *ifr)
3495 {
3496 struct hwtstamp_config config;
3497 int err;
3498
3499 if (!MLX5_CAP_GEN(priv->mdev, device_frequency_khz))
3500 return -EOPNOTSUPP;
3501
3502 if (copy_from_user(&config, ifr->ifr_data, sizeof(config)))
3503 return -EFAULT;
3504
3505 /* TX HW timestamp */
3506 switch (config.tx_type) {
3507 case HWTSTAMP_TX_OFF:
3508 case HWTSTAMP_TX_ON:
3509 break;
3510 default:
3511 return -ERANGE;
3512 }
3513
3514 mutex_lock(&priv->state_lock);
3515 /* RX HW timestamp */
3516 switch (config.rx_filter) {
3517 case HWTSTAMP_FILTER_NONE:
3518 /* Reset CQE compression to Admin default */
3519 mlx5e_modify_rx_cqe_compression_locked(priv, priv->channels.params.rx_cqe_compress_def);
3520 break;
3521 case HWTSTAMP_FILTER_ALL:
3522 case HWTSTAMP_FILTER_SOME:
3523 case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
3524 case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
3525 case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
3526 case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
3527 case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
3528 case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
3529 case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
3530 case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
3531 case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
3532 case HWTSTAMP_FILTER_PTP_V2_EVENT:
3533 case HWTSTAMP_FILTER_PTP_V2_SYNC:
3534 case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
3535 case HWTSTAMP_FILTER_NTP_ALL:
3536 /* Disable CQE compression */
3537 netdev_warn(priv->netdev, "Disabling cqe compression");
3538 err = mlx5e_modify_rx_cqe_compression_locked(priv, false);
3539 if (err) {
3540 netdev_err(priv->netdev, "Failed disabling cqe compression err=%d\n", err);
3541 mutex_unlock(&priv->state_lock);
3542 return err;
3543 }
3544 config.rx_filter = HWTSTAMP_FILTER_ALL;
3545 break;
3546 default:
3547 mutex_unlock(&priv->state_lock);
3548 return -ERANGE;
3549 }
3550
3551 memcpy(&priv->tstamp, &config, sizeof(config));
3552 mutex_unlock(&priv->state_lock);
3553
3554 return copy_to_user(ifr->ifr_data, &config,
3555 sizeof(config)) ? -EFAULT : 0;
3556 }
3557
3558 int mlx5e_hwstamp_get(struct mlx5e_priv *priv, struct ifreq *ifr)
3559 {
3560 struct hwtstamp_config *cfg = &priv->tstamp;
3561
3562 if (!MLX5_CAP_GEN(priv->mdev, device_frequency_khz))
3563 return -EOPNOTSUPP;
3564
3565 return copy_to_user(ifr->ifr_data, cfg, sizeof(*cfg)) ? -EFAULT : 0;
3566 }
3567
3568 static int mlx5e_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
3569 {
3570 struct mlx5e_priv *priv = netdev_priv(dev);
3571
3572 switch (cmd) {
3573 case SIOCSHWTSTAMP:
3574 return mlx5e_hwstamp_set(priv, ifr);
3575 case SIOCGHWTSTAMP:
3576 return mlx5e_hwstamp_get(priv, ifr);
3577 default:
3578 return -EOPNOTSUPP;
3579 }
3580 }
3581
3582 #ifdef CONFIG_MLX5_ESWITCH
3583 static int mlx5e_set_vf_mac(struct net_device *dev, int vf, u8 *mac)
3584 {
3585 struct mlx5e_priv *priv = netdev_priv(dev);
3586 struct mlx5_core_dev *mdev = priv->mdev;
3587
3588 return mlx5_eswitch_set_vport_mac(mdev->priv.eswitch, vf + 1, mac);
3589 }
3590
3591 static int mlx5e_set_vf_vlan(struct net_device *dev, int vf, u16 vlan, u8 qos,
3592 __be16 vlan_proto)
3593 {
3594 struct mlx5e_priv *priv = netdev_priv(dev);
3595 struct mlx5_core_dev *mdev = priv->mdev;
3596
3597 if (vlan_proto != htons(ETH_P_8021Q))
3598 return -EPROTONOSUPPORT;
3599
3600 return mlx5_eswitch_set_vport_vlan(mdev->priv.eswitch, vf + 1,
3601 vlan, qos);
3602 }
3603
3604 static int mlx5e_set_vf_spoofchk(struct net_device *dev, int vf, bool setting)
3605 {
3606 struct mlx5e_priv *priv = netdev_priv(dev);
3607 struct mlx5_core_dev *mdev = priv->mdev;
3608
3609 return mlx5_eswitch_set_vport_spoofchk(mdev->priv.eswitch, vf + 1, setting);
3610 }
3611
3612 static int mlx5e_set_vf_trust(struct net_device *dev, int vf, bool setting)
3613 {
3614 struct mlx5e_priv *priv = netdev_priv(dev);
3615 struct mlx5_core_dev *mdev = priv->mdev;
3616
3617 return mlx5_eswitch_set_vport_trust(mdev->priv.eswitch, vf + 1, setting);
3618 }
3619
3620 static int mlx5e_set_vf_rate(struct net_device *dev, int vf, int min_tx_rate,
3621 int max_tx_rate)
3622 {
3623 struct mlx5e_priv *priv = netdev_priv(dev);
3624 struct mlx5_core_dev *mdev = priv->mdev;
3625
3626 return mlx5_eswitch_set_vport_rate(mdev->priv.eswitch, vf + 1,
3627 max_tx_rate, min_tx_rate);
3628 }
3629
3630 static int mlx5_vport_link2ifla(u8 esw_link)
3631 {
3632 switch (esw_link) {
3633 case MLX5_ESW_VPORT_ADMIN_STATE_DOWN:
3634 return IFLA_VF_LINK_STATE_DISABLE;
3635 case MLX5_ESW_VPORT_ADMIN_STATE_UP:
3636 return IFLA_VF_LINK_STATE_ENABLE;
3637 }
3638 return IFLA_VF_LINK_STATE_AUTO;
3639 }
3640
3641 static int mlx5_ifla_link2vport(u8 ifla_link)
3642 {
3643 switch (ifla_link) {
3644 case IFLA_VF_LINK_STATE_DISABLE:
3645 return MLX5_ESW_VPORT_ADMIN_STATE_DOWN;
3646 case IFLA_VF_LINK_STATE_ENABLE:
3647 return MLX5_ESW_VPORT_ADMIN_STATE_UP;
3648 }
3649 return MLX5_ESW_VPORT_ADMIN_STATE_AUTO;
3650 }
3651
3652 static int mlx5e_set_vf_link_state(struct net_device *dev, int vf,
3653 int link_state)
3654 {
3655 struct mlx5e_priv *priv = netdev_priv(dev);
3656 struct mlx5_core_dev *mdev = priv->mdev;
3657
3658 return mlx5_eswitch_set_vport_state(mdev->priv.eswitch, vf + 1,
3659 mlx5_ifla_link2vport(link_state));
3660 }
3661
3662 static int mlx5e_get_vf_config(struct net_device *dev,
3663 int vf, struct ifla_vf_info *ivi)
3664 {
3665 struct mlx5e_priv *priv = netdev_priv(dev);
3666 struct mlx5_core_dev *mdev = priv->mdev;
3667 int err;
3668
3669 err = mlx5_eswitch_get_vport_config(mdev->priv.eswitch, vf + 1, ivi);
3670 if (err)
3671 return err;
3672 ivi->linkstate = mlx5_vport_link2ifla(ivi->linkstate);
3673 return 0;
3674 }
3675
3676 static int mlx5e_get_vf_stats(struct net_device *dev,
3677 int vf, struct ifla_vf_stats *vf_stats)
3678 {
3679 struct mlx5e_priv *priv = netdev_priv(dev);
3680 struct mlx5_core_dev *mdev = priv->mdev;
3681
3682 return mlx5_eswitch_get_vport_stats(mdev->priv.eswitch, vf + 1,
3683 vf_stats);
3684 }
3685 #endif
3686
3687 static void mlx5e_add_vxlan_port(struct net_device *netdev,
3688 struct udp_tunnel_info *ti)
3689 {
3690 struct mlx5e_priv *priv = netdev_priv(netdev);
3691
3692 if (ti->type != UDP_TUNNEL_TYPE_VXLAN)
3693 return;
3694
3695 if (!mlx5e_vxlan_allowed(priv->mdev))
3696 return;
3697
3698 mlx5e_vxlan_queue_work(priv, ti->sa_family, be16_to_cpu(ti->port), 1);
3699 }
3700
3701 static void mlx5e_del_vxlan_port(struct net_device *netdev,
3702 struct udp_tunnel_info *ti)
3703 {
3704 struct mlx5e_priv *priv = netdev_priv(netdev);
3705
3706 if (ti->type != UDP_TUNNEL_TYPE_VXLAN)
3707 return;
3708
3709 if (!mlx5e_vxlan_allowed(priv->mdev))
3710 return;
3711
3712 mlx5e_vxlan_queue_work(priv, ti->sa_family, be16_to_cpu(ti->port), 0);
3713 }
3714
3715 static netdev_features_t mlx5e_tunnel_features_check(struct mlx5e_priv *priv,
3716 struct sk_buff *skb,
3717 netdev_features_t features)
3718 {
3719 unsigned int offset = 0;
3720 struct udphdr *udph;
3721 u8 proto;
3722 u16 port;
3723
3724 switch (vlan_get_protocol(skb)) {
3725 case htons(ETH_P_IP):
3726 proto = ip_hdr(skb)->protocol;
3727 break;
3728 case htons(ETH_P_IPV6):
3729 proto = ipv6_find_hdr(skb, &offset, -1, NULL, NULL);
3730 break;
3731 default:
3732 goto out;
3733 }
3734
3735 switch (proto) {
3736 case IPPROTO_GRE:
3737 return features;
3738 case IPPROTO_UDP:
3739 udph = udp_hdr(skb);
3740 port = be16_to_cpu(udph->dest);
3741
3742 /* Verify if UDP port is being offloaded by HW */
3743 if (mlx5e_vxlan_lookup_port(priv, port))
3744 return features;
3745 }
3746
3747 out:
3748 /* Disable CSUM and GSO if the udp dport is not offloaded by HW */
3749 return features & ~(NETIF_F_CSUM_MASK | NETIF_F_GSO_MASK);
3750 }
3751
3752 static netdev_features_t mlx5e_features_check(struct sk_buff *skb,
3753 struct net_device *netdev,
3754 netdev_features_t features)
3755 {
3756 struct mlx5e_priv *priv = netdev_priv(netdev);
3757
3758 features = vlan_features_check(skb, features);
3759 features = vxlan_features_check(skb, features);
3760
3761 #ifdef CONFIG_MLX5_EN_IPSEC
3762 if (mlx5e_ipsec_feature_check(skb, netdev, features))
3763 return features;
3764 #endif
3765
3766 /* Validate if the tunneled packet is being offloaded by HW */
3767 if (skb->encapsulation &&
3768 (features & NETIF_F_CSUM_MASK || features & NETIF_F_GSO_MASK))
3769 return mlx5e_tunnel_features_check(priv, skb, features);
3770
3771 return features;
3772 }
3773
3774 static void mlx5e_tx_timeout(struct net_device *dev)
3775 {
3776 struct mlx5e_priv *priv = netdev_priv(dev);
3777 bool sched_work = false;
3778 int i;
3779
3780 netdev_err(dev, "TX timeout detected\n");
3781
3782 for (i = 0; i < priv->channels.num * priv->channels.params.num_tc; i++) {
3783 struct mlx5e_txqsq *sq = priv->txq2sq[i];
3784
3785 if (!netif_xmit_stopped(netdev_get_tx_queue(dev, i)))
3786 continue;
3787 sched_work = true;
3788 clear_bit(MLX5E_SQ_STATE_ENABLED, &sq->state);
3789 netdev_err(dev, "TX timeout on queue: %d, SQ: 0x%x, CQ: 0x%x, SQ Cons: 0x%x SQ Prod: 0x%x\n",
3790 i, sq->sqn, sq->cq.mcq.cqn, sq->cc, sq->pc);
3791 }
3792
3793 if (sched_work && test_bit(MLX5E_STATE_OPENED, &priv->state))
3794 schedule_work(&priv->tx_timeout_work);
3795 }
3796
3797 static int mlx5e_xdp_set(struct net_device *netdev, struct bpf_prog *prog)
3798 {
3799 struct mlx5e_priv *priv = netdev_priv(netdev);
3800 struct bpf_prog *old_prog;
3801 int err = 0;
3802 bool reset, was_opened;
3803 int i;
3804
3805 mutex_lock(&priv->state_lock);
3806
3807 if ((netdev->features & NETIF_F_LRO) && prog) {
3808 netdev_warn(netdev, "can't set XDP while LRO is on, disable LRO first\n");
3809 err = -EINVAL;
3810 goto unlock;
3811 }
3812
3813 if ((netdev->features & NETIF_F_HW_ESP) && prog) {
3814 netdev_warn(netdev, "can't set XDP with IPSec offload\n");
3815 err = -EINVAL;
3816 goto unlock;
3817 }
3818
3819 was_opened = test_bit(MLX5E_STATE_OPENED, &priv->state);
3820 /* no need for full reset when exchanging programs */
3821 reset = (!priv->channels.params.xdp_prog || !prog);
3822
3823 if (was_opened && reset)
3824 mlx5e_close_locked(netdev);
3825 if (was_opened && !reset) {
3826 /* num_channels is invariant here, so we can take the
3827 * batched reference right upfront.
3828 */
3829 prog = bpf_prog_add(prog, priv->channels.num);
3830 if (IS_ERR(prog)) {
3831 err = PTR_ERR(prog);
3832 goto unlock;
3833 }
3834 }
3835
3836 /* exchange programs, extra prog reference we got from caller
3837 * as long as we don't fail from this point onwards.
3838 */
3839 old_prog = xchg(&priv->channels.params.xdp_prog, prog);
3840 if (old_prog)
3841 bpf_prog_put(old_prog);
3842
3843 if (reset) /* change RQ type according to priv->xdp_prog */
3844 mlx5e_set_rq_params(priv->mdev, &priv->channels.params);
3845
3846 if (was_opened && reset)
3847 mlx5e_open_locked(netdev);
3848
3849 if (!test_bit(MLX5E_STATE_OPENED, &priv->state) || reset)
3850 goto unlock;
3851
3852 /* exchanging programs w/o reset, we update ref counts on behalf
3853 * of the channels RQs here.
3854 */
3855 for (i = 0; i < priv->channels.num; i++) {
3856 struct mlx5e_channel *c = priv->channels.c[i];
3857
3858 clear_bit(MLX5E_RQ_STATE_ENABLED, &c->rq.state);
3859 napi_synchronize(&c->napi);
3860 /* prevent mlx5e_poll_rx_cq from accessing rq->xdp_prog */
3861
3862 old_prog = xchg(&c->rq.xdp_prog, prog);
3863
3864 set_bit(MLX5E_RQ_STATE_ENABLED, &c->rq.state);
3865 /* napi_schedule in case we have missed anything */
3866 napi_schedule(&c->napi);
3867
3868 if (old_prog)
3869 bpf_prog_put(old_prog);
3870 }
3871
3872 unlock:
3873 mutex_unlock(&priv->state_lock);
3874 return err;
3875 }
3876
3877 static u32 mlx5e_xdp_query(struct net_device *dev)
3878 {
3879 struct mlx5e_priv *priv = netdev_priv(dev);
3880 const struct bpf_prog *xdp_prog;
3881 u32 prog_id = 0;
3882
3883 mutex_lock(&priv->state_lock);
3884 xdp_prog = priv->channels.params.xdp_prog;
3885 if (xdp_prog)
3886 prog_id = xdp_prog->aux->id;
3887 mutex_unlock(&priv->state_lock);
3888
3889 return prog_id;
3890 }
3891
3892 static int mlx5e_xdp(struct net_device *dev, struct netdev_bpf *xdp)
3893 {
3894 switch (xdp->command) {
3895 case XDP_SETUP_PROG:
3896 return mlx5e_xdp_set(dev, xdp->prog);
3897 case XDP_QUERY_PROG:
3898 xdp->prog_id = mlx5e_xdp_query(dev);
3899 xdp->prog_attached = !!xdp->prog_id;
3900 return 0;
3901 default:
3902 return -EINVAL;
3903 }
3904 }
3905
3906 #ifdef CONFIG_NET_POLL_CONTROLLER
3907 /* Fake "interrupt" called by netpoll (eg netconsole) to send skbs without
3908 * reenabling interrupts.
3909 */
3910 static void mlx5e_netpoll(struct net_device *dev)
3911 {
3912 struct mlx5e_priv *priv = netdev_priv(dev);
3913 struct mlx5e_channels *chs = &priv->channels;
3914
3915 int i;
3916
3917 for (i = 0; i < chs->num; i++)
3918 napi_schedule(&chs->c[i]->napi);
3919 }
3920 #endif
3921
3922 static const struct net_device_ops mlx5e_netdev_ops = {
3923 .ndo_open = mlx5e_open,
3924 .ndo_stop = mlx5e_close,
3925 .ndo_start_xmit = mlx5e_xmit,
3926 .ndo_setup_tc = mlx5e_setup_tc,
3927 .ndo_select_queue = mlx5e_select_queue,
3928 .ndo_get_stats64 = mlx5e_get_stats,
3929 .ndo_set_rx_mode = mlx5e_set_rx_mode,
3930 .ndo_set_mac_address = mlx5e_set_mac,
3931 .ndo_vlan_rx_add_vid = mlx5e_vlan_rx_add_vid,
3932 .ndo_vlan_rx_kill_vid = mlx5e_vlan_rx_kill_vid,
3933 .ndo_set_features = mlx5e_set_features,
3934 .ndo_fix_features = mlx5e_fix_features,
3935 .ndo_change_mtu = mlx5e_change_mtu,
3936 .ndo_do_ioctl = mlx5e_ioctl,
3937 .ndo_set_tx_maxrate = mlx5e_set_tx_maxrate,
3938 .ndo_udp_tunnel_add = mlx5e_add_vxlan_port,
3939 .ndo_udp_tunnel_del = mlx5e_del_vxlan_port,
3940 .ndo_features_check = mlx5e_features_check,
3941 #ifdef CONFIG_RFS_ACCEL
3942 .ndo_rx_flow_steer = mlx5e_rx_flow_steer,
3943 #endif
3944 .ndo_tx_timeout = mlx5e_tx_timeout,
3945 .ndo_bpf = mlx5e_xdp,
3946 #ifdef CONFIG_NET_POLL_CONTROLLER
3947 .ndo_poll_controller = mlx5e_netpoll,
3948 #endif
3949 #ifdef CONFIG_MLX5_ESWITCH
3950 /* SRIOV E-Switch NDOs */
3951 .ndo_set_vf_mac = mlx5e_set_vf_mac,
3952 .ndo_set_vf_vlan = mlx5e_set_vf_vlan,
3953 .ndo_set_vf_spoofchk = mlx5e_set_vf_spoofchk,
3954 .ndo_set_vf_trust = mlx5e_set_vf_trust,
3955 .ndo_set_vf_rate = mlx5e_set_vf_rate,
3956 .ndo_get_vf_config = mlx5e_get_vf_config,
3957 .ndo_set_vf_link_state = mlx5e_set_vf_link_state,
3958 .ndo_get_vf_stats = mlx5e_get_vf_stats,
3959 .ndo_has_offload_stats = mlx5e_has_offload_stats,
3960 .ndo_get_offload_stats = mlx5e_get_offload_stats,
3961 #endif
3962 };
3963
3964 static int mlx5e_check_required_hca_cap(struct mlx5_core_dev *mdev)
3965 {
3966 if (MLX5_CAP_GEN(mdev, port_type) != MLX5_CAP_PORT_TYPE_ETH)
3967 return -EOPNOTSUPP;
3968 if (!MLX5_CAP_GEN(mdev, eth_net_offloads) ||
3969 !MLX5_CAP_GEN(mdev, nic_flow_table) ||
3970 !MLX5_CAP_ETH(mdev, csum_cap) ||
3971 !MLX5_CAP_ETH(mdev, max_lso_cap) ||
3972 !MLX5_CAP_ETH(mdev, vlan_cap) ||
3973 !MLX5_CAP_ETH(mdev, rss_ind_tbl_cap) ||
3974 MLX5_CAP_FLOWTABLE(mdev,
3975 flow_table_properties_nic_receive.max_ft_level)
3976 < 3) {
3977 mlx5_core_warn(mdev,
3978 "Not creating net device, some required device capabilities are missing\n");
3979 return -EOPNOTSUPP;
3980 }
3981 if (!MLX5_CAP_ETH(mdev, self_lb_en_modifiable))
3982 mlx5_core_warn(mdev, "Self loop back prevention is not supported\n");
3983 if (!MLX5_CAP_GEN(mdev, cq_moderation))
3984 mlx5_core_warn(mdev, "CQ moderation is not supported\n");
3985
3986 return 0;
3987 }
3988
3989 u16 mlx5e_get_max_inline_cap(struct mlx5_core_dev *mdev)
3990 {
3991 int bf_buf_size = (1 << MLX5_CAP_GEN(mdev, log_bf_reg_size)) / 2;
3992
3993 return bf_buf_size -
3994 sizeof(struct mlx5e_tx_wqe) +
3995 2 /*sizeof(mlx5e_tx_wqe.inline_hdr_start)*/;
3996 }
3997
3998 void mlx5e_build_default_indir_rqt(u32 *indirection_rqt, int len,
3999 int num_channels)
4000 {
4001 int i;
4002
4003 for (i = 0; i < len; i++)
4004 indirection_rqt[i] = i % num_channels;
4005 }
4006
4007 static int mlx5e_get_pci_bw(struct mlx5_core_dev *mdev, u32 *pci_bw)
4008 {
4009 enum pcie_link_width width;
4010 enum pci_bus_speed speed;
4011 int err = 0;
4012
4013 err = pcie_get_minimum_link(mdev->pdev, &speed, &width);
4014 if (err)
4015 return err;
4016
4017 if (speed == PCI_SPEED_UNKNOWN || width == PCIE_LNK_WIDTH_UNKNOWN)
4018 return -EINVAL;
4019
4020 switch (speed) {
4021 case PCIE_SPEED_2_5GT:
4022 *pci_bw = 2500 * width;
4023 break;
4024 case PCIE_SPEED_5_0GT:
4025 *pci_bw = 5000 * width;
4026 break;
4027 case PCIE_SPEED_8_0GT:
4028 *pci_bw = 8000 * width;
4029 break;
4030 default:
4031 return -EINVAL;
4032 }
4033
4034 return 0;
4035 }
4036
4037 static bool cqe_compress_heuristic(u32 link_speed, u32 pci_bw)
4038 {
4039 return (link_speed && pci_bw &&
4040 (pci_bw < 40000) && (pci_bw < link_speed));
4041 }
4042
4043 static bool hw_lro_heuristic(u32 link_speed, u32 pci_bw)
4044 {
4045 return !(link_speed && pci_bw &&
4046 (pci_bw <= 16000) && (pci_bw < link_speed));
4047 }
4048
4049 void mlx5e_set_tx_cq_mode_params(struct mlx5e_params *params, u8 cq_period_mode)
4050 {
4051 params->tx_cq_moderation.cq_period_mode = cq_period_mode;
4052
4053 params->tx_cq_moderation.pkts =
4054 MLX5E_PARAMS_DEFAULT_TX_CQ_MODERATION_PKTS;
4055 params->tx_cq_moderation.usec =
4056 MLX5E_PARAMS_DEFAULT_TX_CQ_MODERATION_USEC;
4057
4058 if (cq_period_mode == MLX5_CQ_PERIOD_MODE_START_FROM_CQE)
4059 params->tx_cq_moderation.usec =
4060 MLX5E_PARAMS_DEFAULT_TX_CQ_MODERATION_USEC_FROM_CQE;
4061
4062 MLX5E_SET_PFLAG(params, MLX5E_PFLAG_TX_CQE_BASED_MODER,
4063 params->tx_cq_moderation.cq_period_mode ==
4064 MLX5_CQ_PERIOD_MODE_START_FROM_CQE);
4065 }
4066
4067 void mlx5e_set_rx_cq_mode_params(struct mlx5e_params *params, u8 cq_period_mode)
4068 {
4069 params->rx_cq_moderation.cq_period_mode = cq_period_mode;
4070
4071 params->rx_cq_moderation.pkts =
4072 MLX5E_PARAMS_DEFAULT_RX_CQ_MODERATION_PKTS;
4073 params->rx_cq_moderation.usec =
4074 MLX5E_PARAMS_DEFAULT_RX_CQ_MODERATION_USEC;
4075
4076 if (cq_period_mode == MLX5_CQ_PERIOD_MODE_START_FROM_CQE)
4077 params->rx_cq_moderation.usec =
4078 MLX5E_PARAMS_DEFAULT_RX_CQ_MODERATION_USEC_FROM_CQE;
4079
4080 if (params->rx_am_enabled)
4081 params->rx_cq_moderation =
4082 mlx5e_am_get_def_profile(cq_period_mode);
4083
4084 MLX5E_SET_PFLAG(params, MLX5E_PFLAG_RX_CQE_BASED_MODER,
4085 params->rx_cq_moderation.cq_period_mode ==
4086 MLX5_CQ_PERIOD_MODE_START_FROM_CQE);
4087 }
4088
4089 u32 mlx5e_choose_lro_timeout(struct mlx5_core_dev *mdev, u32 wanted_timeout)
4090 {
4091 int i;
4092
4093 /* The supported periods are organized in ascending order */
4094 for (i = 0; i < MLX5E_LRO_TIMEOUT_ARR_SIZE - 1; i++)
4095 if (MLX5_CAP_ETH(mdev, lro_timer_supported_periods[i]) >= wanted_timeout)
4096 break;
4097
4098 return MLX5_CAP_ETH(mdev, lro_timer_supported_periods[i]);
4099 }
4100
4101 void mlx5e_build_nic_params(struct mlx5_core_dev *mdev,
4102 struct mlx5e_params *params,
4103 u16 max_channels)
4104 {
4105 u8 rx_cq_period_mode;
4106 u32 link_speed = 0;
4107 u32 pci_bw = 0;
4108
4109 params->num_channels = max_channels;
4110 params->num_tc = 1;
4111
4112 mlx5e_get_max_linkspeed(mdev, &link_speed);
4113 mlx5e_get_pci_bw(mdev, &pci_bw);
4114 mlx5_core_dbg(mdev, "Max link speed = %d, PCI BW = %d\n",
4115 link_speed, pci_bw);
4116
4117 /* SQ */
4118 params->log_sq_size = is_kdump_kernel() ?
4119 MLX5E_PARAMS_MINIMUM_LOG_SQ_SIZE :
4120 MLX5E_PARAMS_DEFAULT_LOG_SQ_SIZE;
4121
4122 /* set CQE compression */
4123 params->rx_cqe_compress_def = false;
4124 if (MLX5_CAP_GEN(mdev, cqe_compression) &&
4125 MLX5_CAP_GEN(mdev, vport_group_manager))
4126 params->rx_cqe_compress_def = cqe_compress_heuristic(link_speed, pci_bw);
4127
4128 MLX5E_SET_PFLAG(params, MLX5E_PFLAG_RX_CQE_COMPRESS, params->rx_cqe_compress_def);
4129 MLX5E_SET_PFLAG(params, MLX5E_PFLAG_RX_NO_CSUM_COMPLETE, false);
4130
4131 /* RQ */
4132 mlx5e_set_rq_params(mdev, params);
4133
4134 /* HW LRO */
4135
4136 /* TODO: && MLX5_CAP_ETH(mdev, lro_cap) */
4137 if (params->rq_wq_type == MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ)
4138 params->lro_en = hw_lro_heuristic(link_speed, pci_bw);
4139 params->lro_timeout = mlx5e_choose_lro_timeout(mdev, MLX5E_DEFAULT_LRO_TIMEOUT);
4140
4141 /* CQ moderation params */
4142 rx_cq_period_mode = MLX5_CAP_GEN(mdev, cq_period_start_from_cqe) ?
4143 MLX5_CQ_PERIOD_MODE_START_FROM_CQE :
4144 MLX5_CQ_PERIOD_MODE_START_FROM_EQE;
4145 params->rx_am_enabled = MLX5_CAP_GEN(mdev, cq_moderation);
4146 mlx5e_set_rx_cq_mode_params(params, rx_cq_period_mode);
4147 mlx5e_set_tx_cq_mode_params(params, MLX5_CQ_PERIOD_MODE_START_FROM_EQE);
4148
4149 /* TX inline */
4150 params->tx_max_inline = mlx5e_get_max_inline_cap(mdev);
4151 params->tx_min_inline_mode = mlx5e_params_calculate_tx_min_inline(mdev);
4152
4153 /* RSS */
4154 params->rss_hfunc = ETH_RSS_HASH_XOR;
4155 netdev_rss_key_fill(params->toeplitz_hash_key, sizeof(params->toeplitz_hash_key));
4156 mlx5e_build_default_indir_rqt(params->indirection_rqt,
4157 MLX5E_INDIR_RQT_SIZE, max_channels);
4158 }
4159
4160 static void mlx5e_build_nic_netdev_priv(struct mlx5_core_dev *mdev,
4161 struct net_device *netdev,
4162 const struct mlx5e_profile *profile,
4163 void *ppriv)
4164 {
4165 struct mlx5e_priv *priv = netdev_priv(netdev);
4166
4167 priv->mdev = mdev;
4168 priv->netdev = netdev;
4169 priv->profile = profile;
4170 priv->ppriv = ppriv;
4171 priv->msglevel = MLX5E_MSG_LEVEL;
4172 priv->hard_mtu = MLX5E_ETH_HARD_MTU;
4173
4174 mlx5e_build_nic_params(mdev, &priv->channels.params, profile->max_nch(mdev));
4175
4176 mutex_init(&priv->state_lock);
4177
4178 INIT_WORK(&priv->update_carrier_work, mlx5e_update_carrier_work);
4179 INIT_WORK(&priv->set_rx_mode_work, mlx5e_set_rx_mode_work);
4180 INIT_WORK(&priv->tx_timeout_work, mlx5e_tx_timeout_work);
4181 INIT_DELAYED_WORK(&priv->update_stats_work, mlx5e_update_stats_work);
4182
4183 mlx5e_timestamp_init(priv);
4184 }
4185
4186 static void mlx5e_set_netdev_dev_addr(struct net_device *netdev)
4187 {
4188 struct mlx5e_priv *priv = netdev_priv(netdev);
4189
4190 mlx5_query_nic_vport_mac_address(priv->mdev, 0, netdev->dev_addr);
4191 if (is_zero_ether_addr(netdev->dev_addr) &&
4192 !MLX5_CAP_GEN(priv->mdev, vport_group_manager)) {
4193 eth_hw_addr_random(netdev);
4194 mlx5_core_info(priv->mdev, "Assigned random MAC address %pM\n", netdev->dev_addr);
4195 }
4196 }
4197
4198 #if IS_ENABLED(CONFIG_MLX5_ESWITCH)
4199 static const struct switchdev_ops mlx5e_switchdev_ops = {
4200 .switchdev_port_attr_get = mlx5e_attr_get,
4201 };
4202 #endif
4203
4204 static void mlx5e_build_nic_netdev(struct net_device *netdev)
4205 {
4206 struct mlx5e_priv *priv = netdev_priv(netdev);
4207 struct mlx5_core_dev *mdev = priv->mdev;
4208 bool fcs_supported;
4209 bool fcs_enabled;
4210
4211 SET_NETDEV_DEV(netdev, &mdev->pdev->dev);
4212
4213 netdev->netdev_ops = &mlx5e_netdev_ops;
4214
4215 #ifdef CONFIG_MLX5_CORE_EN_DCB
4216 if (MLX5_CAP_GEN(mdev, vport_group_manager) && MLX5_CAP_GEN(mdev, qos))
4217 netdev->dcbnl_ops = &mlx5e_dcbnl_ops;
4218 #endif
4219
4220 netdev->watchdog_timeo = 15 * HZ;
4221
4222 netdev->ethtool_ops = &mlx5e_ethtool_ops;
4223
4224 netdev->vlan_features |= NETIF_F_SG;
4225 netdev->vlan_features |= NETIF_F_IP_CSUM;
4226 netdev->vlan_features |= NETIF_F_IPV6_CSUM;
4227 netdev->vlan_features |= NETIF_F_GRO;
4228 netdev->vlan_features |= NETIF_F_TSO;
4229 netdev->vlan_features |= NETIF_F_TSO6;
4230 netdev->vlan_features |= NETIF_F_RXCSUM;
4231 netdev->vlan_features |= NETIF_F_RXHASH;
4232
4233 if (!!MLX5_CAP_ETH(mdev, lro_cap))
4234 netdev->vlan_features |= NETIF_F_LRO;
4235
4236 netdev->hw_features = netdev->vlan_features;
4237 netdev->hw_features |= NETIF_F_HW_VLAN_CTAG_TX;
4238 netdev->hw_features |= NETIF_F_HW_VLAN_CTAG_RX;
4239 netdev->hw_features |= NETIF_F_HW_VLAN_CTAG_FILTER;
4240 netdev->hw_features |= NETIF_F_HW_VLAN_STAG_TX;
4241
4242 if (mlx5e_vxlan_allowed(mdev) || MLX5_CAP_ETH(mdev, tunnel_stateless_gre)) {
4243 netdev->hw_features |= NETIF_F_GSO_PARTIAL;
4244 netdev->hw_enc_features |= NETIF_F_IP_CSUM;
4245 netdev->hw_enc_features |= NETIF_F_IPV6_CSUM;
4246 netdev->hw_enc_features |= NETIF_F_TSO;
4247 netdev->hw_enc_features |= NETIF_F_TSO6;
4248 netdev->hw_enc_features |= NETIF_F_GSO_PARTIAL;
4249 }
4250
4251 if (mlx5e_vxlan_allowed(mdev)) {
4252 netdev->hw_features |= NETIF_F_GSO_UDP_TUNNEL |
4253 NETIF_F_GSO_UDP_TUNNEL_CSUM;
4254 netdev->hw_enc_features |= NETIF_F_GSO_UDP_TUNNEL |
4255 NETIF_F_GSO_UDP_TUNNEL_CSUM;
4256 netdev->gso_partial_features = NETIF_F_GSO_UDP_TUNNEL_CSUM;
4257 }
4258
4259 if (MLX5_CAP_ETH(mdev, tunnel_stateless_gre)) {
4260 netdev->hw_features |= NETIF_F_GSO_GRE |
4261 NETIF_F_GSO_GRE_CSUM;
4262 netdev->hw_enc_features |= NETIF_F_GSO_GRE |
4263 NETIF_F_GSO_GRE_CSUM;
4264 netdev->gso_partial_features |= NETIF_F_GSO_GRE |
4265 NETIF_F_GSO_GRE_CSUM;
4266 }
4267
4268 mlx5_query_port_fcs(mdev, &fcs_supported, &fcs_enabled);
4269
4270 if (fcs_supported)
4271 netdev->hw_features |= NETIF_F_RXALL;
4272
4273 if (MLX5_CAP_ETH(mdev, scatter_fcs))
4274 netdev->hw_features |= NETIF_F_RXFCS;
4275
4276 netdev->features = netdev->hw_features;
4277 if (!priv->channels.params.lro_en)
4278 netdev->features &= ~NETIF_F_LRO;
4279
4280 if (fcs_enabled)
4281 netdev->features &= ~NETIF_F_RXALL;
4282
4283 if (!priv->channels.params.scatter_fcs_en)
4284 netdev->features &= ~NETIF_F_RXFCS;
4285
4286 #define FT_CAP(f) MLX5_CAP_FLOWTABLE(mdev, flow_table_properties_nic_receive.f)
4287 if (FT_CAP(flow_modify_en) &&
4288 FT_CAP(modify_root) &&
4289 FT_CAP(identified_miss_table_mode) &&
4290 FT_CAP(flow_table_modify)) {
4291 netdev->hw_features |= NETIF_F_HW_TC;
4292 #ifdef CONFIG_RFS_ACCEL
4293 netdev->hw_features |= NETIF_F_NTUPLE;
4294 #endif
4295 }
4296
4297 netdev->features |= NETIF_F_HIGHDMA;
4298 netdev->features |= NETIF_F_HW_VLAN_STAG_FILTER;
4299
4300 netdev->priv_flags |= IFF_UNICAST_FLT;
4301
4302 mlx5e_set_netdev_dev_addr(netdev);
4303
4304 #if IS_ENABLED(CONFIG_MLX5_ESWITCH)
4305 if (MLX5_ESWITCH_MANAGER(mdev))
4306 netdev->switchdev_ops = &mlx5e_switchdev_ops;
4307 #endif
4308
4309 mlx5e_ipsec_build_netdev(priv);
4310 }
4311
4312 static void mlx5e_create_q_counter(struct mlx5e_priv *priv)
4313 {
4314 struct mlx5_core_dev *mdev = priv->mdev;
4315 int err;
4316
4317 err = mlx5_core_alloc_q_counter(mdev, &priv->q_counter);
4318 if (err) {
4319 mlx5_core_warn(mdev, "alloc queue counter failed, %d\n", err);
4320 priv->q_counter = 0;
4321 }
4322 }
4323
4324 static void mlx5e_destroy_q_counter(struct mlx5e_priv *priv)
4325 {
4326 if (!priv->q_counter)
4327 return;
4328
4329 mlx5_core_dealloc_q_counter(priv->mdev, priv->q_counter);
4330 }
4331
4332 static void mlx5e_nic_init(struct mlx5_core_dev *mdev,
4333 struct net_device *netdev,
4334 const struct mlx5e_profile *profile,
4335 void *ppriv)
4336 {
4337 struct mlx5e_priv *priv = netdev_priv(netdev);
4338 int err;
4339
4340 mlx5e_build_nic_netdev_priv(mdev, netdev, profile, ppriv);
4341 err = mlx5e_ipsec_init(priv);
4342 if (err)
4343 mlx5_core_err(mdev, "IPSec initialization failed, %d\n", err);
4344 mlx5e_build_nic_netdev(netdev);
4345 mlx5e_vxlan_init(priv);
4346 }
4347
4348 static void mlx5e_nic_cleanup(struct mlx5e_priv *priv)
4349 {
4350 mlx5e_ipsec_cleanup(priv);
4351 mlx5e_vxlan_cleanup(priv);
4352
4353 if (priv->channels.params.xdp_prog)
4354 bpf_prog_put(priv->channels.params.xdp_prog);
4355 }
4356
4357 static int mlx5e_init_nic_rx(struct mlx5e_priv *priv)
4358 {
4359 struct mlx5_core_dev *mdev = priv->mdev;
4360 int err;
4361
4362 err = mlx5e_create_indirect_rqt(priv);
4363 if (err)
4364 return err;
4365
4366 err = mlx5e_create_direct_rqts(priv);
4367 if (err)
4368 goto err_destroy_indirect_rqts;
4369
4370 err = mlx5e_create_indirect_tirs(priv);
4371 if (err)
4372 goto err_destroy_direct_rqts;
4373
4374 err = mlx5e_create_direct_tirs(priv);
4375 if (err)
4376 goto err_destroy_indirect_tirs;
4377
4378 err = mlx5e_create_flow_steering(priv);
4379 if (err) {
4380 mlx5_core_warn(mdev, "create flow steering failed, %d\n", err);
4381 goto err_destroy_direct_tirs;
4382 }
4383
4384 err = mlx5e_tc_init(priv);
4385 if (err)
4386 goto err_destroy_flow_steering;
4387
4388 return 0;
4389
4390 err_destroy_flow_steering:
4391 mlx5e_destroy_flow_steering(priv);
4392 err_destroy_direct_tirs:
4393 mlx5e_destroy_direct_tirs(priv);
4394 err_destroy_indirect_tirs:
4395 mlx5e_destroy_indirect_tirs(priv);
4396 err_destroy_direct_rqts:
4397 mlx5e_destroy_direct_rqts(priv);
4398 err_destroy_indirect_rqts:
4399 mlx5e_destroy_rqt(priv, &priv->indir_rqt);
4400 return err;
4401 }
4402
4403 static void mlx5e_cleanup_nic_rx(struct mlx5e_priv *priv)
4404 {
4405 mlx5e_tc_cleanup(priv);
4406 mlx5e_destroy_flow_steering(priv);
4407 mlx5e_destroy_direct_tirs(priv);
4408 mlx5e_destroy_indirect_tirs(priv);
4409 mlx5e_destroy_direct_rqts(priv);
4410 mlx5e_destroy_rqt(priv, &priv->indir_rqt);
4411 }
4412
4413 static int mlx5e_init_nic_tx(struct mlx5e_priv *priv)
4414 {
4415 int err;
4416
4417 err = mlx5e_create_tises(priv);
4418 if (err) {
4419 mlx5_core_warn(priv->mdev, "create tises failed, %d\n", err);
4420 return err;
4421 }
4422
4423 #ifdef CONFIG_MLX5_CORE_EN_DCB
4424 mlx5e_dcbnl_initialize(priv);
4425 #endif
4426 return 0;
4427 }
4428
4429 static void mlx5e_nic_enable(struct mlx5e_priv *priv)
4430 {
4431 struct net_device *netdev = priv->netdev;
4432 struct mlx5_core_dev *mdev = priv->mdev;
4433 u16 max_mtu;
4434
4435 mlx5e_init_l2_addr(priv);
4436
4437 /* Marking the link as currently not needed by the Driver */
4438 if (!netif_running(netdev))
4439 mlx5_set_port_admin_status(mdev, MLX5_PORT_DOWN);
4440
4441 /* MTU range: 68 - hw-specific max */
4442 netdev->min_mtu = ETH_MIN_MTU;
4443 mlx5_query_port_max_mtu(priv->mdev, &max_mtu, 1);
4444 netdev->max_mtu = MLX5E_HW2SW_MTU(priv, max_mtu);
4445 mlx5e_set_dev_port_mtu(priv);
4446
4447 mlx5_lag_add(mdev, netdev);
4448
4449 mlx5e_enable_async_events(priv);
4450
4451 if (MLX5_ESWITCH_MANAGER(priv->mdev))
4452 mlx5e_register_vport_reps(priv);
4453
4454 if (netdev->reg_state != NETREG_REGISTERED)
4455 return;
4456 #ifdef CONFIG_MLX5_CORE_EN_DCB
4457 mlx5e_dcbnl_init_app(priv);
4458 #endif
4459
4460 queue_work(priv->wq, &priv->set_rx_mode_work);
4461
4462 rtnl_lock();
4463 if (netif_running(netdev))
4464 mlx5e_open(netdev);
4465 netif_device_attach(netdev);
4466 rtnl_unlock();
4467 }
4468
4469 static void mlx5e_nic_disable(struct mlx5e_priv *priv)
4470 {
4471 struct mlx5_core_dev *mdev = priv->mdev;
4472
4473 #ifdef CONFIG_MLX5_CORE_EN_DCB
4474 if (priv->netdev->reg_state == NETREG_REGISTERED)
4475 mlx5e_dcbnl_delete_app(priv);
4476 #endif
4477
4478 rtnl_lock();
4479 if (netif_running(priv->netdev))
4480 mlx5e_close(priv->netdev);
4481 netif_device_detach(priv->netdev);
4482 rtnl_unlock();
4483
4484 queue_work(priv->wq, &priv->set_rx_mode_work);
4485
4486 if (MLX5_ESWITCH_MANAGER(priv->mdev))
4487 mlx5e_unregister_vport_reps(priv);
4488
4489 mlx5e_disable_async_events(priv);
4490 mlx5_lag_remove(mdev);
4491 }
4492
4493 static const struct mlx5e_profile mlx5e_nic_profile = {
4494 .init = mlx5e_nic_init,
4495 .cleanup = mlx5e_nic_cleanup,
4496 .init_rx = mlx5e_init_nic_rx,
4497 .cleanup_rx = mlx5e_cleanup_nic_rx,
4498 .init_tx = mlx5e_init_nic_tx,
4499 .cleanup_tx = mlx5e_cleanup_nic_tx,
4500 .enable = mlx5e_nic_enable,
4501 .disable = mlx5e_nic_disable,
4502 .update_stats = mlx5e_update_ndo_stats,
4503 .max_nch = mlx5e_get_max_num_channels,
4504 .update_carrier = mlx5e_update_carrier,
4505 .rx_handlers.handle_rx_cqe = mlx5e_handle_rx_cqe,
4506 .rx_handlers.handle_rx_cqe_mpwqe = mlx5e_handle_rx_cqe_mpwrq,
4507 .max_tc = MLX5E_MAX_NUM_TC,
4508 };
4509
4510 /* mlx5e generic netdev management API (move to en_common.c) */
4511
4512 struct net_device *mlx5e_create_netdev(struct mlx5_core_dev *mdev,
4513 const struct mlx5e_profile *profile,
4514 void *ppriv)
4515 {
4516 int nch = profile->max_nch(mdev);
4517 struct net_device *netdev;
4518 struct mlx5e_priv *priv;
4519
4520 netdev = alloc_etherdev_mqs(sizeof(struct mlx5e_priv),
4521 nch * profile->max_tc,
4522 nch);
4523 if (!netdev) {
4524 mlx5_core_err(mdev, "alloc_etherdev_mqs() failed\n");
4525 return NULL;
4526 }
4527
4528 #ifdef CONFIG_RFS_ACCEL
4529 netdev->rx_cpu_rmap = mdev->rmap;
4530 #endif
4531
4532 profile->init(mdev, netdev, profile, ppriv);
4533
4534 netif_carrier_off(netdev);
4535
4536 priv = netdev_priv(netdev);
4537
4538 priv->wq = create_singlethread_workqueue("mlx5e");
4539 if (!priv->wq)
4540 goto err_cleanup_nic;
4541
4542 return netdev;
4543
4544 err_cleanup_nic:
4545 if (profile->cleanup)
4546 profile->cleanup(priv);
4547 free_netdev(netdev);
4548
4549 return NULL;
4550 }
4551
4552 int mlx5e_attach_netdev(struct mlx5e_priv *priv)
4553 {
4554 struct mlx5_core_dev *mdev = priv->mdev;
4555 const struct mlx5e_profile *profile;
4556 int max_nch;
4557 int err;
4558
4559 profile = priv->profile;
4560 clear_bit(MLX5E_STATE_DESTROYING, &priv->state);
4561
4562 /* max number of channels may have changed */
4563 max_nch = mlx5e_get_max_num_channels(priv->mdev);
4564 if (priv->channels.params.num_channels > max_nch) {
4565 mlx5_core_warn(priv->mdev, "MLX5E: Reducing number of channels to %d\n", max_nch);
4566 priv->channels.params.num_channels = max_nch;
4567 mlx5e_build_default_indir_rqt(priv->channels.params.indirection_rqt,
4568 MLX5E_INDIR_RQT_SIZE, max_nch);
4569 }
4570
4571 err = profile->init_tx(priv);
4572 if (err)
4573 goto out;
4574
4575 err = mlx5e_open_drop_rq(mdev, &priv->drop_rq);
4576 if (err) {
4577 mlx5_core_err(mdev, "open drop rq failed, %d\n", err);
4578 goto err_cleanup_tx;
4579 }
4580
4581 err = profile->init_rx(priv);
4582 if (err)
4583 goto err_close_drop_rq;
4584
4585 mlx5e_create_q_counter(priv);
4586
4587 if (profile->enable)
4588 profile->enable(priv);
4589
4590 return 0;
4591
4592 err_close_drop_rq:
4593 mlx5e_close_drop_rq(&priv->drop_rq);
4594
4595 err_cleanup_tx:
4596 profile->cleanup_tx(priv);
4597
4598 out:
4599 return err;
4600 }
4601
4602 void mlx5e_detach_netdev(struct mlx5e_priv *priv)
4603 {
4604 const struct mlx5e_profile *profile = priv->profile;
4605
4606 set_bit(MLX5E_STATE_DESTROYING, &priv->state);
4607
4608 if (profile->disable)
4609 profile->disable(priv);
4610 flush_workqueue(priv->wq);
4611
4612 mlx5e_destroy_q_counter(priv);
4613 profile->cleanup_rx(priv);
4614 mlx5e_close_drop_rq(&priv->drop_rq);
4615 profile->cleanup_tx(priv);
4616 cancel_delayed_work_sync(&priv->update_stats_work);
4617 }
4618
4619 void mlx5e_destroy_netdev(struct mlx5e_priv *priv)
4620 {
4621 const struct mlx5e_profile *profile = priv->profile;
4622 struct net_device *netdev = priv->netdev;
4623
4624 destroy_workqueue(priv->wq);
4625 if (profile->cleanup)
4626 profile->cleanup(priv);
4627 free_netdev(netdev);
4628 }
4629
4630 /* mlx5e_attach and mlx5e_detach scope should be only creating/destroying
4631 * hardware contexts and to connect it to the current netdev.
4632 */
4633 static int mlx5e_attach(struct mlx5_core_dev *mdev, void *vpriv)
4634 {
4635 struct mlx5e_priv *priv = vpriv;
4636 struct net_device *netdev = priv->netdev;
4637 int err;
4638
4639 if (netif_device_present(netdev))
4640 return 0;
4641
4642 err = mlx5e_create_mdev_resources(mdev);
4643 if (err)
4644 return err;
4645
4646 err = mlx5e_attach_netdev(priv);
4647 if (err) {
4648 mlx5e_destroy_mdev_resources(mdev);
4649 return err;
4650 }
4651
4652 return 0;
4653 }
4654
4655 static void mlx5e_detach(struct mlx5_core_dev *mdev, void *vpriv)
4656 {
4657 struct mlx5e_priv *priv = vpriv;
4658 struct net_device *netdev = priv->netdev;
4659
4660 if (!netif_device_present(netdev))
4661 return;
4662
4663 mlx5e_detach_netdev(priv);
4664 mlx5e_destroy_mdev_resources(mdev);
4665 }
4666
4667 static void *mlx5e_add(struct mlx5_core_dev *mdev)
4668 {
4669 struct net_device *netdev;
4670 void *rpriv = NULL;
4671 void *priv;
4672 int err;
4673
4674 err = mlx5e_check_required_hca_cap(mdev);
4675 if (err)
4676 return NULL;
4677
4678 #ifdef CONFIG_MLX5_ESWITCH
4679 if (MLX5_ESWITCH_MANAGER(mdev)) {
4680 rpriv = mlx5e_alloc_nic_rep_priv(mdev);
4681 if (!rpriv) {
4682 mlx5_core_warn(mdev, "Failed to alloc NIC rep priv data\n");
4683 return NULL;
4684 }
4685 }
4686 #endif
4687
4688 netdev = mlx5e_create_netdev(mdev, &mlx5e_nic_profile, rpriv);
4689 if (!netdev) {
4690 mlx5_core_err(mdev, "mlx5e_create_netdev failed\n");
4691 goto err_free_rpriv;
4692 }
4693
4694 priv = netdev_priv(netdev);
4695
4696 err = mlx5e_attach(mdev, priv);
4697 if (err) {
4698 mlx5_core_err(mdev, "mlx5e_attach failed, %d\n", err);
4699 goto err_destroy_netdev;
4700 }
4701
4702 err = register_netdev(netdev);
4703 if (err) {
4704 mlx5_core_err(mdev, "register_netdev failed, %d\n", err);
4705 goto err_detach;
4706 }
4707
4708 #ifdef CONFIG_MLX5_CORE_EN_DCB
4709 mlx5e_dcbnl_init_app(priv);
4710 #endif
4711 return priv;
4712
4713 err_detach:
4714 mlx5e_detach(mdev, priv);
4715 err_destroy_netdev:
4716 mlx5e_destroy_netdev(priv);
4717 err_free_rpriv:
4718 kfree(rpriv);
4719 return NULL;
4720 }
4721
4722 static void mlx5e_remove(struct mlx5_core_dev *mdev, void *vpriv)
4723 {
4724 struct mlx5e_priv *priv = vpriv;
4725 void *ppriv = priv->ppriv;
4726
4727 #ifdef CONFIG_MLX5_CORE_EN_DCB
4728 mlx5e_dcbnl_delete_app(priv);
4729 #endif
4730 unregister_netdev(priv->netdev);
4731 mlx5e_detach(mdev, vpriv);
4732 mlx5e_destroy_netdev(priv);
4733 kfree(ppriv);
4734 }
4735
4736 static void *mlx5e_get_netdev(void *vpriv)
4737 {
4738 struct mlx5e_priv *priv = vpriv;
4739
4740 return priv->netdev;
4741 }
4742
4743 static struct mlx5_interface mlx5e_interface = {
4744 .add = mlx5e_add,
4745 .remove = mlx5e_remove,
4746 .attach = mlx5e_attach,
4747 .detach = mlx5e_detach,
4748 .event = mlx5e_async_event,
4749 .protocol = MLX5_INTERFACE_PROTOCOL_ETH,
4750 .get_dev = mlx5e_get_netdev,
4751 };
4752
4753 void mlx5e_init(void)
4754 {
4755 mlx5e_ipsec_build_inverse_table();
4756 mlx5e_build_ptys2ethtool_map();
4757 mlx5_register_interface(&mlx5e_interface);
4758 }
4759
4760 void mlx5e_cleanup(void)
4761 {
4762 mlx5_unregister_interface(&mlx5e_interface);
4763 }