2 * Copyright (c) 2015-2016, Mellanox Technologies. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33 #include <net/tc_act/tc_gact.h>
34 #include <net/pkt_cls.h>
35 #include <linux/mlx5/fs.h>
36 #include <net/vxlan.h>
37 #include <linux/bpf.h>
43 struct mlx5e_rq_param
{
44 u32 rqc
[MLX5_ST_SZ_DW(rqc
)];
45 struct mlx5_wq_param wq
;
48 struct mlx5e_sq_param
{
49 u32 sqc
[MLX5_ST_SZ_DW(sqc
)];
50 struct mlx5_wq_param wq
;
53 struct mlx5e_cq_param
{
54 u32 cqc
[MLX5_ST_SZ_DW(cqc
)];
55 struct mlx5_wq_param wq
;
60 struct mlx5e_channel_param
{
61 struct mlx5e_rq_param rq
;
62 struct mlx5e_sq_param sq
;
63 struct mlx5e_sq_param xdp_sq
;
64 struct mlx5e_sq_param icosq
;
65 struct mlx5e_cq_param rx_cq
;
66 struct mlx5e_cq_param tx_cq
;
67 struct mlx5e_cq_param icosq_cq
;
70 static bool mlx5e_check_fragmented_striding_rq_cap(struct mlx5_core_dev
*mdev
)
72 return MLX5_CAP_GEN(mdev
, striding_rq
) &&
73 MLX5_CAP_GEN(mdev
, umr_ptr_rlky
) &&
74 MLX5_CAP_ETH(mdev
, reg_umr_sq
);
77 void mlx5e_set_rq_type_params(struct mlx5_core_dev
*mdev
,
78 struct mlx5e_params
*params
, u8 rq_type
)
80 params
->rq_wq_type
= rq_type
;
81 params
->lro_wqe_sz
= MLX5E_PARAMS_DEFAULT_LRO_WQE_SZ
;
82 switch (params
->rq_wq_type
) {
83 case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ
:
84 params
->log_rq_size
= is_kdump_kernel() ?
85 MLX5E_PARAMS_MINIMUM_LOG_RQ_SIZE_MPW
:
86 MLX5E_PARAMS_DEFAULT_LOG_RQ_SIZE_MPW
;
87 params
->mpwqe_log_stride_sz
=
88 MLX5E_GET_PFLAG(params
, MLX5E_PFLAG_RX_CQE_COMPRESS
) ?
89 MLX5_MPWRQ_CQE_CMPRS_LOG_STRIDE_SZ(mdev
) :
90 MLX5_MPWRQ_DEF_LOG_STRIDE_SZ(mdev
);
91 params
->mpwqe_log_num_strides
= MLX5_MPWRQ_LOG_WQE_SZ
-
92 params
->mpwqe_log_stride_sz
;
94 default: /* MLX5_WQ_TYPE_LINKED_LIST */
95 params
->log_rq_size
= is_kdump_kernel() ?
96 MLX5E_PARAMS_MINIMUM_LOG_RQ_SIZE
:
97 MLX5E_PARAMS_DEFAULT_LOG_RQ_SIZE
;
99 /* Extra room needed for build_skb */
100 params
->lro_wqe_sz
-= MLX5_RX_HEADROOM
+
101 SKB_DATA_ALIGN(sizeof(struct skb_shared_info
));
104 mlx5_core_info(mdev
, "MLX5E: StrdRq(%d) RqSz(%ld) StrdSz(%ld) RxCqeCmprss(%d)\n",
105 params
->rq_wq_type
== MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ
,
106 BIT(params
->log_rq_size
),
107 BIT(params
->mpwqe_log_stride_sz
),
108 MLX5E_GET_PFLAG(params
, MLX5E_PFLAG_RX_CQE_COMPRESS
));
111 static void mlx5e_set_rq_params(struct mlx5_core_dev
*mdev
, struct mlx5e_params
*params
)
113 u8 rq_type
= mlx5e_check_fragmented_striding_rq_cap(mdev
) &&
115 MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ
:
116 MLX5_WQ_TYPE_LINKED_LIST
;
117 mlx5e_set_rq_type_params(mdev
, params
, rq_type
);
120 static void mlx5e_update_carrier(struct mlx5e_priv
*priv
)
122 struct mlx5_core_dev
*mdev
= priv
->mdev
;
125 port_state
= mlx5_query_vport_state(mdev
,
126 MLX5_QUERY_VPORT_STATE_IN_OP_MOD_VNIC_VPORT
, 0);
128 if (port_state
== VPORT_STATE_UP
) {
129 netdev_info(priv
->netdev
, "Link up\n");
130 netif_carrier_on(priv
->netdev
);
132 netdev_info(priv
->netdev
, "Link down\n");
133 netif_carrier_off(priv
->netdev
);
137 static void mlx5e_update_carrier_work(struct work_struct
*work
)
139 struct mlx5e_priv
*priv
= container_of(work
, struct mlx5e_priv
,
140 update_carrier_work
);
142 mutex_lock(&priv
->state_lock
);
143 if (test_bit(MLX5E_STATE_OPENED
, &priv
->state
))
144 mlx5e_update_carrier(priv
);
145 mutex_unlock(&priv
->state_lock
);
148 static void mlx5e_tx_timeout_work(struct work_struct
*work
)
150 struct mlx5e_priv
*priv
= container_of(work
, struct mlx5e_priv
,
155 mutex_lock(&priv
->state_lock
);
156 if (!test_bit(MLX5E_STATE_OPENED
, &priv
->state
))
158 mlx5e_close_locked(priv
->netdev
);
159 err
= mlx5e_open_locked(priv
->netdev
);
161 netdev_err(priv
->netdev
, "mlx5e_open_locked failed recovering from a tx_timeout, err(%d).\n",
164 mutex_unlock(&priv
->state_lock
);
168 static void mlx5e_update_sw_counters(struct mlx5e_priv
*priv
)
170 struct mlx5e_sw_stats
*s
= &priv
->stats
.sw
;
171 struct mlx5e_rq_stats
*rq_stats
;
172 struct mlx5e_sq_stats
*sq_stats
;
173 u64 tx_offload_none
= 0;
176 memset(s
, 0, sizeof(*s
));
177 for (i
= 0; i
< priv
->channels
.num
; i
++) {
178 struct mlx5e_channel
*c
= priv
->channels
.c
[i
];
180 rq_stats
= &c
->rq
.stats
;
182 s
->rx_packets
+= rq_stats
->packets
;
183 s
->rx_bytes
+= rq_stats
->bytes
;
184 s
->rx_lro_packets
+= rq_stats
->lro_packets
;
185 s
->rx_lro_bytes
+= rq_stats
->lro_bytes
;
186 s
->rx_csum_none
+= rq_stats
->csum_none
;
187 s
->rx_csum_complete
+= rq_stats
->csum_complete
;
188 s
->rx_csum_unnecessary_inner
+= rq_stats
->csum_unnecessary_inner
;
189 s
->rx_xdp_drop
+= rq_stats
->xdp_drop
;
190 s
->rx_xdp_tx
+= rq_stats
->xdp_tx
;
191 s
->rx_xdp_tx_full
+= rq_stats
->xdp_tx_full
;
192 s
->rx_wqe_err
+= rq_stats
->wqe_err
;
193 s
->rx_mpwqe_filler
+= rq_stats
->mpwqe_filler
;
194 s
->rx_buff_alloc_err
+= rq_stats
->buff_alloc_err
;
195 s
->rx_cqe_compress_blks
+= rq_stats
->cqe_compress_blks
;
196 s
->rx_cqe_compress_pkts
+= rq_stats
->cqe_compress_pkts
;
197 s
->rx_cache_reuse
+= rq_stats
->cache_reuse
;
198 s
->rx_cache_full
+= rq_stats
->cache_full
;
199 s
->rx_cache_empty
+= rq_stats
->cache_empty
;
200 s
->rx_cache_busy
+= rq_stats
->cache_busy
;
202 for (j
= 0; j
< priv
->channels
.params
.num_tc
; j
++) {
203 sq_stats
= &c
->sq
[j
].stats
;
205 s
->tx_packets
+= sq_stats
->packets
;
206 s
->tx_bytes
+= sq_stats
->bytes
;
207 s
->tx_tso_packets
+= sq_stats
->tso_packets
;
208 s
->tx_tso_bytes
+= sq_stats
->tso_bytes
;
209 s
->tx_tso_inner_packets
+= sq_stats
->tso_inner_packets
;
210 s
->tx_tso_inner_bytes
+= sq_stats
->tso_inner_bytes
;
211 s
->tx_queue_stopped
+= sq_stats
->stopped
;
212 s
->tx_queue_wake
+= sq_stats
->wake
;
213 s
->tx_queue_dropped
+= sq_stats
->dropped
;
214 s
->tx_xmit_more
+= sq_stats
->xmit_more
;
215 s
->tx_csum_partial_inner
+= sq_stats
->csum_partial_inner
;
216 tx_offload_none
+= sq_stats
->csum_none
;
220 /* Update calculated offload counters */
221 s
->tx_csum_partial
= s
->tx_packets
- tx_offload_none
- s
->tx_csum_partial_inner
;
222 s
->rx_csum_unnecessary
= s
->rx_packets
- s
->rx_csum_none
- s
->rx_csum_complete
;
224 s
->link_down_events_phy
= MLX5_GET(ppcnt_reg
,
225 priv
->stats
.pport
.phy_counters
,
226 counter_set
.phys_layer_cntrs
.link_down_events
);
229 static void mlx5e_update_vport_counters(struct mlx5e_priv
*priv
)
231 int outlen
= MLX5_ST_SZ_BYTES(query_vport_counter_out
);
232 u32
*out
= (u32
*)priv
->stats
.vport
.query_vport_out
;
233 u32 in
[MLX5_ST_SZ_DW(query_vport_counter_in
)] = {0};
234 struct mlx5_core_dev
*mdev
= priv
->mdev
;
236 MLX5_SET(query_vport_counter_in
, in
, opcode
,
237 MLX5_CMD_OP_QUERY_VPORT_COUNTER
);
238 MLX5_SET(query_vport_counter_in
, in
, op_mod
, 0);
239 MLX5_SET(query_vport_counter_in
, in
, other_vport
, 0);
241 memset(out
, 0, outlen
);
242 mlx5_cmd_exec(mdev
, in
, sizeof(in
), out
, outlen
);
245 static void mlx5e_update_pport_counters(struct mlx5e_priv
*priv
)
247 struct mlx5e_pport_stats
*pstats
= &priv
->stats
.pport
;
248 struct mlx5_core_dev
*mdev
= priv
->mdev
;
249 int sz
= MLX5_ST_SZ_BYTES(ppcnt_reg
);
254 in
= mlx5_vzalloc(sz
);
258 MLX5_SET(ppcnt_reg
, in
, local_port
, 1);
260 out
= pstats
->IEEE_802_3_counters
;
261 MLX5_SET(ppcnt_reg
, in
, grp
, MLX5_IEEE_802_3_COUNTERS_GROUP
);
262 mlx5_core_access_reg(mdev
, in
, sz
, out
, sz
, MLX5_REG_PPCNT
, 0, 0);
264 out
= pstats
->RFC_2863_counters
;
265 MLX5_SET(ppcnt_reg
, in
, grp
, MLX5_RFC_2863_COUNTERS_GROUP
);
266 mlx5_core_access_reg(mdev
, in
, sz
, out
, sz
, MLX5_REG_PPCNT
, 0, 0);
268 out
= pstats
->RFC_2819_counters
;
269 MLX5_SET(ppcnt_reg
, in
, grp
, MLX5_RFC_2819_COUNTERS_GROUP
);
270 mlx5_core_access_reg(mdev
, in
, sz
, out
, sz
, MLX5_REG_PPCNT
, 0, 0);
272 out
= pstats
->phy_counters
;
273 MLX5_SET(ppcnt_reg
, in
, grp
, MLX5_PHYSICAL_LAYER_COUNTERS_GROUP
);
274 mlx5_core_access_reg(mdev
, in
, sz
, out
, sz
, MLX5_REG_PPCNT
, 0, 0);
276 if (MLX5_CAP_PCAM_FEATURE(mdev
, ppcnt_statistical_group
)) {
277 out
= pstats
->phy_statistical_counters
;
278 MLX5_SET(ppcnt_reg
, in
, grp
, MLX5_PHYSICAL_LAYER_STATISTICAL_GROUP
);
279 mlx5_core_access_reg(mdev
, in
, sz
, out
, sz
, MLX5_REG_PPCNT
, 0, 0);
282 MLX5_SET(ppcnt_reg
, in
, grp
, MLX5_PER_PRIORITY_COUNTERS_GROUP
);
283 for (prio
= 0; prio
< NUM_PPORT_PRIO
; prio
++) {
284 out
= pstats
->per_prio_counters
[prio
];
285 MLX5_SET(ppcnt_reg
, in
, prio_tc
, prio
);
286 mlx5_core_access_reg(mdev
, in
, sz
, out
, sz
,
287 MLX5_REG_PPCNT
, 0, 0);
294 static void mlx5e_update_q_counter(struct mlx5e_priv
*priv
)
296 struct mlx5e_qcounter_stats
*qcnt
= &priv
->stats
.qcnt
;
298 if (!priv
->q_counter
)
301 mlx5_core_query_out_of_buffer(priv
->mdev
, priv
->q_counter
,
302 &qcnt
->rx_out_of_buffer
);
305 static void mlx5e_update_pcie_counters(struct mlx5e_priv
*priv
)
307 struct mlx5e_pcie_stats
*pcie_stats
= &priv
->stats
.pcie
;
308 struct mlx5_core_dev
*mdev
= priv
->mdev
;
309 int sz
= MLX5_ST_SZ_BYTES(mpcnt_reg
);
313 if (!MLX5_CAP_MCAM_FEATURE(mdev
, pcie_performance_group
))
316 in
= mlx5_vzalloc(sz
);
320 out
= pcie_stats
->pcie_perf_counters
;
321 MLX5_SET(mpcnt_reg
, in
, grp
, MLX5_PCIE_PERFORMANCE_COUNTERS_GROUP
);
322 mlx5_core_access_reg(mdev
, in
, sz
, out
, sz
, MLX5_REG_MPCNT
, 0, 0);
327 void mlx5e_update_stats(struct mlx5e_priv
*priv
)
329 mlx5e_update_pcie_counters(priv
);
330 mlx5e_update_pport_counters(priv
);
331 mlx5e_update_vport_counters(priv
);
332 mlx5e_update_q_counter(priv
);
333 mlx5e_update_sw_counters(priv
);
336 void mlx5e_update_stats_work(struct work_struct
*work
)
338 struct delayed_work
*dwork
= to_delayed_work(work
);
339 struct mlx5e_priv
*priv
= container_of(dwork
, struct mlx5e_priv
,
341 mutex_lock(&priv
->state_lock
);
342 if (test_bit(MLX5E_STATE_OPENED
, &priv
->state
)) {
343 priv
->profile
->update_stats(priv
);
344 queue_delayed_work(priv
->wq
, dwork
,
345 msecs_to_jiffies(MLX5E_UPDATE_STATS_INTERVAL
));
347 mutex_unlock(&priv
->state_lock
);
350 static void mlx5e_async_event(struct mlx5_core_dev
*mdev
, void *vpriv
,
351 enum mlx5_dev_event event
, unsigned long param
)
353 struct mlx5e_priv
*priv
= vpriv
;
354 struct ptp_clock_event ptp_event
;
355 struct mlx5_eqe
*eqe
= NULL
;
357 if (!test_bit(MLX5E_STATE_ASYNC_EVENTS_ENABLED
, &priv
->state
))
361 case MLX5_DEV_EVENT_PORT_UP
:
362 case MLX5_DEV_EVENT_PORT_DOWN
:
363 queue_work(priv
->wq
, &priv
->update_carrier_work
);
365 case MLX5_DEV_EVENT_PPS
:
366 eqe
= (struct mlx5_eqe
*)param
;
367 ptp_event
.type
= PTP_CLOCK_EXTTS
;
368 ptp_event
.index
= eqe
->data
.pps
.pin
;
369 ptp_event
.timestamp
=
370 timecounter_cyc2time(&priv
->tstamp
.clock
,
371 be64_to_cpu(eqe
->data
.pps
.time_stamp
));
372 mlx5e_pps_event_handler(vpriv
, &ptp_event
);
379 static void mlx5e_enable_async_events(struct mlx5e_priv
*priv
)
381 set_bit(MLX5E_STATE_ASYNC_EVENTS_ENABLED
, &priv
->state
);
384 static void mlx5e_disable_async_events(struct mlx5e_priv
*priv
)
386 clear_bit(MLX5E_STATE_ASYNC_EVENTS_ENABLED
, &priv
->state
);
387 synchronize_irq(mlx5_get_msix_vec(priv
->mdev
, MLX5_EQ_VEC_ASYNC
));
390 static inline int mlx5e_get_wqe_mtt_sz(void)
392 /* UMR copies MTTs in units of MLX5_UMR_MTT_ALIGNMENT bytes.
393 * To avoid copying garbage after the mtt array, we allocate
396 return ALIGN(MLX5_MPWRQ_PAGES_PER_WQE
* sizeof(__be64
),
397 MLX5_UMR_MTT_ALIGNMENT
);
400 static inline void mlx5e_build_umr_wqe(struct mlx5e_rq
*rq
,
401 struct mlx5e_icosq
*sq
,
402 struct mlx5e_umr_wqe
*wqe
,
405 struct mlx5_wqe_ctrl_seg
*cseg
= &wqe
->ctrl
;
406 struct mlx5_wqe_umr_ctrl_seg
*ucseg
= &wqe
->uctrl
;
407 struct mlx5_wqe_data_seg
*dseg
= &wqe
->data
;
408 struct mlx5e_mpw_info
*wi
= &rq
->mpwqe
.info
[ix
];
409 u8 ds_cnt
= DIV_ROUND_UP(sizeof(*wqe
), MLX5_SEND_WQE_DS
);
410 u32 umr_wqe_mtt_offset
= mlx5e_get_wqe_mtt_offset(rq
, ix
);
412 cseg
->qpn_ds
= cpu_to_be32((sq
->sqn
<< MLX5_WQE_CTRL_QPN_SHIFT
) |
414 cseg
->fm_ce_se
= MLX5_WQE_CTRL_CQ_UPDATE
;
415 cseg
->imm
= rq
->mkey_be
;
417 ucseg
->flags
= MLX5_UMR_TRANSLATION_OFFSET_EN
;
418 ucseg
->xlt_octowords
=
419 cpu_to_be16(MLX5_MTT_OCTW(MLX5_MPWRQ_PAGES_PER_WQE
));
420 ucseg
->bsf_octowords
=
421 cpu_to_be16(MLX5_MTT_OCTW(umr_wqe_mtt_offset
));
422 ucseg
->mkey_mask
= cpu_to_be64(MLX5_MKEY_MASK_FREE
);
424 dseg
->lkey
= sq
->mkey_be
;
425 dseg
->addr
= cpu_to_be64(wi
->umr
.mtt_addr
);
428 static int mlx5e_rq_alloc_mpwqe_info(struct mlx5e_rq
*rq
,
429 struct mlx5e_channel
*c
)
431 int wq_sz
= mlx5_wq_ll_get_size(&rq
->wq
);
432 int mtt_sz
= mlx5e_get_wqe_mtt_sz();
433 int mtt_alloc
= mtt_sz
+ MLX5_UMR_ALIGN
- 1;
436 rq
->mpwqe
.info
= kzalloc_node(wq_sz
* sizeof(*rq
->mpwqe
.info
),
437 GFP_KERNEL
, cpu_to_node(c
->cpu
));
441 /* We allocate more than mtt_sz as we will align the pointer */
442 rq
->mpwqe
.mtt_no_align
= kzalloc_node(mtt_alloc
* wq_sz
, GFP_KERNEL
,
443 cpu_to_node(c
->cpu
));
444 if (unlikely(!rq
->mpwqe
.mtt_no_align
))
445 goto err_free_wqe_info
;
447 for (i
= 0; i
< wq_sz
; i
++) {
448 struct mlx5e_mpw_info
*wi
= &rq
->mpwqe
.info
[i
];
450 wi
->umr
.mtt
= PTR_ALIGN(rq
->mpwqe
.mtt_no_align
+ i
* mtt_alloc
,
452 wi
->umr
.mtt_addr
= dma_map_single(c
->pdev
, wi
->umr
.mtt
, mtt_sz
,
454 if (unlikely(dma_mapping_error(c
->pdev
, wi
->umr
.mtt_addr
)))
457 mlx5e_build_umr_wqe(rq
, &c
->icosq
, &wi
->umr
.wqe
, i
);
464 struct mlx5e_mpw_info
*wi
= &rq
->mpwqe
.info
[i
];
466 dma_unmap_single(c
->pdev
, wi
->umr
.mtt_addr
, mtt_sz
,
469 kfree(rq
->mpwqe
.mtt_no_align
);
471 kfree(rq
->mpwqe
.info
);
477 static void mlx5e_rq_free_mpwqe_info(struct mlx5e_rq
*rq
)
479 int wq_sz
= mlx5_wq_ll_get_size(&rq
->wq
);
480 int mtt_sz
= mlx5e_get_wqe_mtt_sz();
483 for (i
= 0; i
< wq_sz
; i
++) {
484 struct mlx5e_mpw_info
*wi
= &rq
->mpwqe
.info
[i
];
486 dma_unmap_single(rq
->pdev
, wi
->umr
.mtt_addr
, mtt_sz
,
489 kfree(rq
->mpwqe
.mtt_no_align
);
490 kfree(rq
->mpwqe
.info
);
493 static int mlx5e_create_umr_mkey(struct mlx5_core_dev
*mdev
,
494 u64 npages
, u8 page_shift
,
495 struct mlx5_core_mkey
*umr_mkey
)
497 int inlen
= MLX5_ST_SZ_BYTES(create_mkey_in
);
502 if (!MLX5E_VALID_NUM_MTTS(npages
))
505 in
= mlx5_vzalloc(inlen
);
509 mkc
= MLX5_ADDR_OF(create_mkey_in
, in
, memory_key_mkey_entry
);
511 MLX5_SET(mkc
, mkc
, free
, 1);
512 MLX5_SET(mkc
, mkc
, umr_en
, 1);
513 MLX5_SET(mkc
, mkc
, lw
, 1);
514 MLX5_SET(mkc
, mkc
, lr
, 1);
515 MLX5_SET(mkc
, mkc
, access_mode
, MLX5_MKC_ACCESS_MODE_MTT
);
517 MLX5_SET(mkc
, mkc
, qpn
, 0xffffff);
518 MLX5_SET(mkc
, mkc
, pd
, mdev
->mlx5e_res
.pdn
);
519 MLX5_SET64(mkc
, mkc
, len
, npages
<< page_shift
);
520 MLX5_SET(mkc
, mkc
, translations_octword_size
,
521 MLX5_MTT_OCTW(npages
));
522 MLX5_SET(mkc
, mkc
, log_page_size
, page_shift
);
524 err
= mlx5_core_create_mkey(mdev
, umr_mkey
, in
, inlen
);
530 static int mlx5e_create_rq_umr_mkey(struct mlx5_core_dev
*mdev
, struct mlx5e_rq
*rq
)
532 u64 num_mtts
= MLX5E_REQUIRED_MTTS(mlx5_wq_ll_get_size(&rq
->wq
));
534 return mlx5e_create_umr_mkey(mdev
, num_mtts
, PAGE_SHIFT
, &rq
->umr_mkey
);
537 static int mlx5e_alloc_rq(struct mlx5e_channel
*c
,
538 struct mlx5e_params
*params
,
539 struct mlx5e_rq_param
*rqp
,
542 struct mlx5_core_dev
*mdev
= c
->mdev
;
543 void *rqc
= rqp
->rqc
;
544 void *rqc_wq
= MLX5_ADDR_OF(rqc
, rqc
, wq
);
552 rqp
->wq
.db_numa_node
= cpu_to_node(c
->cpu
);
554 err
= mlx5_wq_ll_create(mdev
, &rqp
->wq
, rqc_wq
, &rq
->wq
,
559 rq
->wq
.db
= &rq
->wq
.db
[MLX5_RCV_DBR
];
561 wq_sz
= mlx5_wq_ll_get_size(&rq
->wq
);
563 rq
->wq_type
= params
->rq_wq_type
;
565 rq
->netdev
= c
->netdev
;
566 rq
->tstamp
= c
->tstamp
;
571 rq
->xdp_prog
= params
->xdp_prog
? bpf_prog_inc(params
->xdp_prog
) : NULL
;
572 if (IS_ERR(rq
->xdp_prog
)) {
573 err
= PTR_ERR(rq
->xdp_prog
);
575 goto err_rq_wq_destroy
;
579 rq
->buff
.map_dir
= DMA_BIDIRECTIONAL
;
580 rq
->rx_headroom
= XDP_PACKET_HEADROOM
;
582 rq
->buff
.map_dir
= DMA_FROM_DEVICE
;
583 rq
->rx_headroom
= MLX5_RX_HEADROOM
;
586 switch (rq
->wq_type
) {
587 case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ
:
589 rq
->alloc_wqe
= mlx5e_alloc_rx_mpwqe
;
590 rq
->dealloc_wqe
= mlx5e_dealloc_rx_mpwqe
;
592 rq
->handle_rx_cqe
= c
->priv
->profile
->rx_handlers
.handle_rx_cqe_mpwqe
;
593 if (!rq
->handle_rx_cqe
) {
595 netdev_err(c
->netdev
, "RX handler of MPWQE RQ is not set, err %d\n", err
);
596 goto err_rq_wq_destroy
;
599 rq
->mpwqe_stride_sz
= BIT(params
->mpwqe_log_stride_sz
);
600 rq
->mpwqe_num_strides
= BIT(params
->mpwqe_log_num_strides
);
602 rq
->buff
.wqe_sz
= rq
->mpwqe_stride_sz
* rq
->mpwqe_num_strides
;
603 byte_count
= rq
->buff
.wqe_sz
;
605 err
= mlx5e_create_rq_umr_mkey(mdev
, rq
);
607 goto err_rq_wq_destroy
;
608 rq
->mkey_be
= cpu_to_be32(rq
->umr_mkey
.key
);
610 err
= mlx5e_rq_alloc_mpwqe_info(rq
, c
);
612 goto err_destroy_umr_mkey
;
614 default: /* MLX5_WQ_TYPE_LINKED_LIST */
615 rq
->dma_info
= kzalloc_node(wq_sz
* sizeof(*rq
->dma_info
),
616 GFP_KERNEL
, cpu_to_node(c
->cpu
));
619 goto err_rq_wq_destroy
;
621 rq
->alloc_wqe
= mlx5e_alloc_rx_wqe
;
622 rq
->dealloc_wqe
= mlx5e_dealloc_rx_wqe
;
624 rq
->handle_rx_cqe
= c
->priv
->profile
->rx_handlers
.handle_rx_cqe
;
625 if (!rq
->handle_rx_cqe
) {
628 netdev_err(c
->netdev
, "RX handler of RQ is not set, err %d\n", err
);
629 goto err_rq_wq_destroy
;
632 rq
->buff
.wqe_sz
= params
->lro_en
?
634 MLX5E_SW2HW_MTU(c
->netdev
->mtu
);
635 byte_count
= rq
->buff
.wqe_sz
;
637 /* calc the required page order */
638 frag_sz
= rq
->rx_headroom
+
639 byte_count
/* packet data */ +
640 SKB_DATA_ALIGN(sizeof(struct skb_shared_info
));
641 frag_sz
= SKB_DATA_ALIGN(frag_sz
);
643 npages
= DIV_ROUND_UP(frag_sz
, PAGE_SIZE
);
644 rq
->buff
.page_order
= order_base_2(npages
);
646 byte_count
|= MLX5_HW_START_PADDING
;
647 rq
->mkey_be
= c
->mkey_be
;
650 for (i
= 0; i
< wq_sz
; i
++) {
651 struct mlx5e_rx_wqe
*wqe
= mlx5_wq_ll_get_wqe(&rq
->wq
, i
);
653 wqe
->data
.byte_count
= cpu_to_be32(byte_count
);
654 wqe
->data
.lkey
= rq
->mkey_be
;
657 INIT_WORK(&rq
->am
.work
, mlx5e_rx_am_work
);
658 rq
->am
.mode
= params
->rx_cq_period_mode
;
659 rq
->page_cache
.head
= 0;
660 rq
->page_cache
.tail
= 0;
664 err_destroy_umr_mkey
:
665 mlx5_core_destroy_mkey(mdev
, &rq
->umr_mkey
);
669 bpf_prog_put(rq
->xdp_prog
);
670 mlx5_wq_destroy(&rq
->wq_ctrl
);
675 static void mlx5e_free_rq(struct mlx5e_rq
*rq
)
680 bpf_prog_put(rq
->xdp_prog
);
682 switch (rq
->wq_type
) {
683 case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ
:
684 mlx5e_rq_free_mpwqe_info(rq
);
685 mlx5_core_destroy_mkey(rq
->mdev
, &rq
->umr_mkey
);
687 default: /* MLX5_WQ_TYPE_LINKED_LIST */
691 for (i
= rq
->page_cache
.head
; i
!= rq
->page_cache
.tail
;
692 i
= (i
+ 1) & (MLX5E_CACHE_SIZE
- 1)) {
693 struct mlx5e_dma_info
*dma_info
= &rq
->page_cache
.page_cache
[i
];
695 mlx5e_page_release(rq
, dma_info
, false);
697 mlx5_wq_destroy(&rq
->wq_ctrl
);
700 static int mlx5e_create_rq(struct mlx5e_rq
*rq
,
701 struct mlx5e_rq_param
*param
)
703 struct mlx5_core_dev
*mdev
= rq
->mdev
;
711 inlen
= MLX5_ST_SZ_BYTES(create_rq_in
) +
712 sizeof(u64
) * rq
->wq_ctrl
.buf
.npages
;
713 in
= mlx5_vzalloc(inlen
);
717 rqc
= MLX5_ADDR_OF(create_rq_in
, in
, ctx
);
718 wq
= MLX5_ADDR_OF(rqc
, rqc
, wq
);
720 memcpy(rqc
, param
->rqc
, sizeof(param
->rqc
));
722 MLX5_SET(rqc
, rqc
, cqn
, rq
->cq
.mcq
.cqn
);
723 MLX5_SET(rqc
, rqc
, state
, MLX5_RQC_STATE_RST
);
724 MLX5_SET(wq
, wq
, log_wq_pg_sz
, rq
->wq_ctrl
.buf
.page_shift
-
725 MLX5_ADAPTER_PAGE_SHIFT
);
726 MLX5_SET64(wq
, wq
, dbr_addr
, rq
->wq_ctrl
.db
.dma
);
728 mlx5_fill_page_array(&rq
->wq_ctrl
.buf
,
729 (__be64
*)MLX5_ADDR_OF(wq
, wq
, pas
));
731 err
= mlx5_core_create_rq(mdev
, in
, inlen
, &rq
->rqn
);
738 static int mlx5e_modify_rq_state(struct mlx5e_rq
*rq
, int curr_state
,
741 struct mlx5e_channel
*c
= rq
->channel
;
742 struct mlx5_core_dev
*mdev
= c
->mdev
;
749 inlen
= MLX5_ST_SZ_BYTES(modify_rq_in
);
750 in
= mlx5_vzalloc(inlen
);
754 rqc
= MLX5_ADDR_OF(modify_rq_in
, in
, ctx
);
756 MLX5_SET(modify_rq_in
, in
, rq_state
, curr_state
);
757 MLX5_SET(rqc
, rqc
, state
, next_state
);
759 err
= mlx5_core_modify_rq(mdev
, rq
->rqn
, in
, inlen
);
766 static int mlx5e_modify_rq_scatter_fcs(struct mlx5e_rq
*rq
, bool enable
)
768 struct mlx5e_channel
*c
= rq
->channel
;
769 struct mlx5e_priv
*priv
= c
->priv
;
770 struct mlx5_core_dev
*mdev
= priv
->mdev
;
777 inlen
= MLX5_ST_SZ_BYTES(modify_rq_in
);
778 in
= mlx5_vzalloc(inlen
);
782 rqc
= MLX5_ADDR_OF(modify_rq_in
, in
, ctx
);
784 MLX5_SET(modify_rq_in
, in
, rq_state
, MLX5_RQC_STATE_RDY
);
785 MLX5_SET64(modify_rq_in
, in
, modify_bitmask
,
786 MLX5_MODIFY_RQ_IN_MODIFY_BITMASK_SCATTER_FCS
);
787 MLX5_SET(rqc
, rqc
, scatter_fcs
, enable
);
788 MLX5_SET(rqc
, rqc
, state
, MLX5_RQC_STATE_RDY
);
790 err
= mlx5_core_modify_rq(mdev
, rq
->rqn
, in
, inlen
);
797 static int mlx5e_modify_rq_vsd(struct mlx5e_rq
*rq
, bool vsd
)
799 struct mlx5e_channel
*c
= rq
->channel
;
800 struct mlx5_core_dev
*mdev
= c
->mdev
;
806 inlen
= MLX5_ST_SZ_BYTES(modify_rq_in
);
807 in
= mlx5_vzalloc(inlen
);
811 rqc
= MLX5_ADDR_OF(modify_rq_in
, in
, ctx
);
813 MLX5_SET(modify_rq_in
, in
, rq_state
, MLX5_RQC_STATE_RDY
);
814 MLX5_SET64(modify_rq_in
, in
, modify_bitmask
,
815 MLX5_MODIFY_RQ_IN_MODIFY_BITMASK_VSD
);
816 MLX5_SET(rqc
, rqc
, vsd
, vsd
);
817 MLX5_SET(rqc
, rqc
, state
, MLX5_RQC_STATE_RDY
);
819 err
= mlx5_core_modify_rq(mdev
, rq
->rqn
, in
, inlen
);
826 static void mlx5e_destroy_rq(struct mlx5e_rq
*rq
)
828 mlx5_core_destroy_rq(rq
->mdev
, rq
->rqn
);
831 static int mlx5e_wait_for_min_rx_wqes(struct mlx5e_rq
*rq
)
833 unsigned long exp_time
= jiffies
+ msecs_to_jiffies(20000);
834 struct mlx5e_channel
*c
= rq
->channel
;
836 struct mlx5_wq_ll
*wq
= &rq
->wq
;
837 u16 min_wqes
= mlx5_min_rx_wqes(rq
->wq_type
, mlx5_wq_ll_get_size(wq
));
839 while (time_before(jiffies
, exp_time
)) {
840 if (wq
->cur_sz
>= min_wqes
)
846 netdev_warn(c
->netdev
, "Failed to get min RX wqes on RQN[0x%x] wq cur_sz(%d) min_rx_wqes(%d)\n",
847 rq
->rqn
, wq
->cur_sz
, min_wqes
);
851 static void mlx5e_free_rx_descs(struct mlx5e_rq
*rq
)
853 struct mlx5_wq_ll
*wq
= &rq
->wq
;
854 struct mlx5e_rx_wqe
*wqe
;
858 /* UMR WQE (if in progress) is always at wq->head */
859 if (test_bit(MLX5E_RQ_STATE_UMR_WQE_IN_PROGRESS
, &rq
->state
))
860 mlx5e_free_rx_mpwqe(rq
, &rq
->mpwqe
.info
[wq
->head
]);
862 while (!mlx5_wq_ll_is_empty(wq
)) {
863 wqe_ix_be
= *wq
->tail_next
;
864 wqe_ix
= be16_to_cpu(wqe_ix_be
);
865 wqe
= mlx5_wq_ll_get_wqe(&rq
->wq
, wqe_ix
);
866 rq
->dealloc_wqe(rq
, wqe_ix
);
867 mlx5_wq_ll_pop(&rq
->wq
, wqe_ix_be
,
868 &wqe
->next
.next_wqe_index
);
872 static int mlx5e_open_rq(struct mlx5e_channel
*c
,
873 struct mlx5e_params
*params
,
874 struct mlx5e_rq_param
*param
,
879 err
= mlx5e_alloc_rq(c
, params
, param
, rq
);
883 err
= mlx5e_create_rq(rq
, param
);
887 err
= mlx5e_modify_rq_state(rq
, MLX5_RQC_STATE_RST
, MLX5_RQC_STATE_RDY
);
891 if (params
->rx_am_enabled
)
892 set_bit(MLX5E_RQ_STATE_AM
, &c
->rq
.state
);
897 mlx5e_destroy_rq(rq
);
904 static void mlx5e_activate_rq(struct mlx5e_rq
*rq
)
906 struct mlx5e_icosq
*sq
= &rq
->channel
->icosq
;
907 u16 pi
= sq
->pc
& sq
->wq
.sz_m1
;
908 struct mlx5e_tx_wqe
*nopwqe
;
910 set_bit(MLX5E_RQ_STATE_ENABLED
, &rq
->state
);
911 sq
->db
.ico_wqe
[pi
].opcode
= MLX5_OPCODE_NOP
;
912 sq
->db
.ico_wqe
[pi
].num_wqebbs
= 1;
913 nopwqe
= mlx5e_post_nop(&sq
->wq
, sq
->sqn
, &sq
->pc
);
914 mlx5e_notify_hw(&sq
->wq
, sq
->pc
, sq
->uar_map
, &nopwqe
->ctrl
);
917 static void mlx5e_deactivate_rq(struct mlx5e_rq
*rq
)
919 clear_bit(MLX5E_RQ_STATE_ENABLED
, &rq
->state
);
920 napi_synchronize(&rq
->channel
->napi
); /* prevent mlx5e_post_rx_wqes */
923 static void mlx5e_close_rq(struct mlx5e_rq
*rq
)
925 cancel_work_sync(&rq
->am
.work
);
926 mlx5e_destroy_rq(rq
);
927 mlx5e_free_rx_descs(rq
);
931 static void mlx5e_free_xdpsq_db(struct mlx5e_xdpsq
*sq
)
936 static int mlx5e_alloc_xdpsq_db(struct mlx5e_xdpsq
*sq
, int numa
)
938 int wq_sz
= mlx5_wq_cyc_get_size(&sq
->wq
);
940 sq
->db
.di
= kzalloc_node(sizeof(*sq
->db
.di
) * wq_sz
,
943 mlx5e_free_xdpsq_db(sq
);
950 static int mlx5e_alloc_xdpsq(struct mlx5e_channel
*c
,
951 struct mlx5e_params
*params
,
952 struct mlx5e_sq_param
*param
,
953 struct mlx5e_xdpsq
*sq
)
955 void *sqc_wq
= MLX5_ADDR_OF(sqc
, param
->sqc
, wq
);
956 struct mlx5_core_dev
*mdev
= c
->mdev
;
960 sq
->mkey_be
= c
->mkey_be
;
962 sq
->uar_map
= mdev
->mlx5e_res
.bfreg
.map
;
963 sq
->min_inline_mode
= params
->tx_min_inline_mode
;
965 param
->wq
.db_numa_node
= cpu_to_node(c
->cpu
);
966 err
= mlx5_wq_cyc_create(mdev
, ¶m
->wq
, sqc_wq
, &sq
->wq
, &sq
->wq_ctrl
);
969 sq
->wq
.db
= &sq
->wq
.db
[MLX5_SND_DBR
];
971 err
= mlx5e_alloc_xdpsq_db(sq
, cpu_to_node(c
->cpu
));
973 goto err_sq_wq_destroy
;
978 mlx5_wq_destroy(&sq
->wq_ctrl
);
983 static void mlx5e_free_xdpsq(struct mlx5e_xdpsq
*sq
)
985 mlx5e_free_xdpsq_db(sq
);
986 mlx5_wq_destroy(&sq
->wq_ctrl
);
989 static void mlx5e_free_icosq_db(struct mlx5e_icosq
*sq
)
991 kfree(sq
->db
.ico_wqe
);
994 static int mlx5e_alloc_icosq_db(struct mlx5e_icosq
*sq
, int numa
)
996 u8 wq_sz
= mlx5_wq_cyc_get_size(&sq
->wq
);
998 sq
->db
.ico_wqe
= kzalloc_node(sizeof(*sq
->db
.ico_wqe
) * wq_sz
,
1000 if (!sq
->db
.ico_wqe
)
1006 static int mlx5e_alloc_icosq(struct mlx5e_channel
*c
,
1007 struct mlx5e_sq_param
*param
,
1008 struct mlx5e_icosq
*sq
)
1010 void *sqc_wq
= MLX5_ADDR_OF(sqc
, param
->sqc
, wq
);
1011 struct mlx5_core_dev
*mdev
= c
->mdev
;
1015 sq
->mkey_be
= c
->mkey_be
;
1017 sq
->uar_map
= mdev
->mlx5e_res
.bfreg
.map
;
1019 param
->wq
.db_numa_node
= cpu_to_node(c
->cpu
);
1020 err
= mlx5_wq_cyc_create(mdev
, ¶m
->wq
, sqc_wq
, &sq
->wq
, &sq
->wq_ctrl
);
1023 sq
->wq
.db
= &sq
->wq
.db
[MLX5_SND_DBR
];
1025 err
= mlx5e_alloc_icosq_db(sq
, cpu_to_node(c
->cpu
));
1027 goto err_sq_wq_destroy
;
1029 sq
->edge
= (sq
->wq
.sz_m1
+ 1) - MLX5E_ICOSQ_MAX_WQEBBS
;
1034 mlx5_wq_destroy(&sq
->wq_ctrl
);
1039 static void mlx5e_free_icosq(struct mlx5e_icosq
*sq
)
1041 mlx5e_free_icosq_db(sq
);
1042 mlx5_wq_destroy(&sq
->wq_ctrl
);
1045 static void mlx5e_free_txqsq_db(struct mlx5e_txqsq
*sq
)
1047 kfree(sq
->db
.wqe_info
);
1048 kfree(sq
->db
.dma_fifo
);
1051 static int mlx5e_alloc_txqsq_db(struct mlx5e_txqsq
*sq
, int numa
)
1053 int wq_sz
= mlx5_wq_cyc_get_size(&sq
->wq
);
1054 int df_sz
= wq_sz
* MLX5_SEND_WQEBB_NUM_DS
;
1056 sq
->db
.dma_fifo
= kzalloc_node(df_sz
* sizeof(*sq
->db
.dma_fifo
),
1058 sq
->db
.wqe_info
= kzalloc_node(wq_sz
* sizeof(*sq
->db
.wqe_info
),
1060 if (!sq
->db
.dma_fifo
|| !sq
->db
.wqe_info
) {
1061 mlx5e_free_txqsq_db(sq
);
1065 sq
->dma_fifo_mask
= df_sz
- 1;
1070 static int mlx5e_alloc_txqsq(struct mlx5e_channel
*c
,
1072 struct mlx5e_params
*params
,
1073 struct mlx5e_sq_param
*param
,
1074 struct mlx5e_txqsq
*sq
)
1076 void *sqc_wq
= MLX5_ADDR_OF(sqc
, param
->sqc
, wq
);
1077 struct mlx5_core_dev
*mdev
= c
->mdev
;
1081 sq
->tstamp
= c
->tstamp
;
1082 sq
->mkey_be
= c
->mkey_be
;
1084 sq
->txq_ix
= txq_ix
;
1085 sq
->uar_map
= mdev
->mlx5e_res
.bfreg
.map
;
1086 sq
->max_inline
= params
->tx_max_inline
;
1087 sq
->min_inline_mode
= params
->tx_min_inline_mode
;
1089 param
->wq
.db_numa_node
= cpu_to_node(c
->cpu
);
1090 err
= mlx5_wq_cyc_create(mdev
, ¶m
->wq
, sqc_wq
, &sq
->wq
, &sq
->wq_ctrl
);
1093 sq
->wq
.db
= &sq
->wq
.db
[MLX5_SND_DBR
];
1095 err
= mlx5e_alloc_txqsq_db(sq
, cpu_to_node(c
->cpu
));
1097 goto err_sq_wq_destroy
;
1099 sq
->edge
= (sq
->wq
.sz_m1
+ 1) - MLX5_SEND_WQE_MAX_WQEBBS
;
1104 mlx5_wq_destroy(&sq
->wq_ctrl
);
1109 static void mlx5e_free_txqsq(struct mlx5e_txqsq
*sq
)
1111 mlx5e_free_txqsq_db(sq
);
1112 mlx5_wq_destroy(&sq
->wq_ctrl
);
1115 struct mlx5e_create_sq_param
{
1116 struct mlx5_wq_ctrl
*wq_ctrl
;
1123 static int mlx5e_create_sq(struct mlx5_core_dev
*mdev
,
1124 struct mlx5e_sq_param
*param
,
1125 struct mlx5e_create_sq_param
*csp
,
1134 inlen
= MLX5_ST_SZ_BYTES(create_sq_in
) +
1135 sizeof(u64
) * csp
->wq_ctrl
->buf
.npages
;
1136 in
= mlx5_vzalloc(inlen
);
1140 sqc
= MLX5_ADDR_OF(create_sq_in
, in
, ctx
);
1141 wq
= MLX5_ADDR_OF(sqc
, sqc
, wq
);
1143 memcpy(sqc
, param
->sqc
, sizeof(param
->sqc
));
1144 MLX5_SET(sqc
, sqc
, tis_lst_sz
, csp
->tis_lst_sz
);
1145 MLX5_SET(sqc
, sqc
, tis_num_0
, csp
->tisn
);
1146 MLX5_SET(sqc
, sqc
, cqn
, csp
->cqn
);
1148 if (MLX5_CAP_ETH(mdev
, wqe_inline_mode
) == MLX5_CAP_INLINE_MODE_VPORT_CONTEXT
)
1149 MLX5_SET(sqc
, sqc
, min_wqe_inline_mode
, csp
->min_inline_mode
);
1151 MLX5_SET(sqc
, sqc
, state
, MLX5_SQC_STATE_RST
);
1153 MLX5_SET(wq
, wq
, wq_type
, MLX5_WQ_TYPE_CYCLIC
);
1154 MLX5_SET(wq
, wq
, uar_page
, mdev
->mlx5e_res
.bfreg
.index
);
1155 MLX5_SET(wq
, wq
, log_wq_pg_sz
, csp
->wq_ctrl
->buf
.page_shift
-
1156 MLX5_ADAPTER_PAGE_SHIFT
);
1157 MLX5_SET64(wq
, wq
, dbr_addr
, csp
->wq_ctrl
->db
.dma
);
1159 mlx5_fill_page_array(&csp
->wq_ctrl
->buf
, (__be64
*)MLX5_ADDR_OF(wq
, wq
, pas
));
1161 err
= mlx5_core_create_sq(mdev
, in
, inlen
, sqn
);
1168 struct mlx5e_modify_sq_param
{
1175 static int mlx5e_modify_sq(struct mlx5_core_dev
*mdev
, u32 sqn
,
1176 struct mlx5e_modify_sq_param
*p
)
1183 inlen
= MLX5_ST_SZ_BYTES(modify_sq_in
);
1184 in
= mlx5_vzalloc(inlen
);
1188 sqc
= MLX5_ADDR_OF(modify_sq_in
, in
, ctx
);
1190 MLX5_SET(modify_sq_in
, in
, sq_state
, p
->curr_state
);
1191 MLX5_SET(sqc
, sqc
, state
, p
->next_state
);
1192 if (p
->rl_update
&& p
->next_state
== MLX5_SQC_STATE_RDY
) {
1193 MLX5_SET64(modify_sq_in
, in
, modify_bitmask
, 1);
1194 MLX5_SET(sqc
, sqc
, packet_pacing_rate_limit_index
, p
->rl_index
);
1197 err
= mlx5_core_modify_sq(mdev
, sqn
, in
, inlen
);
1204 static void mlx5e_destroy_sq(struct mlx5_core_dev
*mdev
, u32 sqn
)
1206 mlx5_core_destroy_sq(mdev
, sqn
);
1209 static int mlx5e_create_sq_rdy(struct mlx5_core_dev
*mdev
,
1210 struct mlx5e_sq_param
*param
,
1211 struct mlx5e_create_sq_param
*csp
,
1214 struct mlx5e_modify_sq_param msp
= {0};
1217 err
= mlx5e_create_sq(mdev
, param
, csp
, sqn
);
1221 msp
.curr_state
= MLX5_SQC_STATE_RST
;
1222 msp
.next_state
= MLX5_SQC_STATE_RDY
;
1223 err
= mlx5e_modify_sq(mdev
, *sqn
, &msp
);
1225 mlx5e_destroy_sq(mdev
, *sqn
);
1230 static int mlx5e_set_sq_maxrate(struct net_device
*dev
,
1231 struct mlx5e_txqsq
*sq
, u32 rate
);
1233 static int mlx5e_open_txqsq(struct mlx5e_channel
*c
,
1236 struct mlx5e_params
*params
,
1237 struct mlx5e_sq_param
*param
,
1238 struct mlx5e_txqsq
*sq
)
1240 struct mlx5e_create_sq_param csp
= {};
1244 err
= mlx5e_alloc_txqsq(c
, txq_ix
, params
, param
, sq
);
1250 csp
.cqn
= sq
->cq
.mcq
.cqn
;
1251 csp
.wq_ctrl
= &sq
->wq_ctrl
;
1252 csp
.min_inline_mode
= sq
->min_inline_mode
;
1253 err
= mlx5e_create_sq_rdy(c
->mdev
, param
, &csp
, &sq
->sqn
);
1255 goto err_free_txqsq
;
1257 tx_rate
= c
->priv
->tx_rates
[sq
->txq_ix
];
1259 mlx5e_set_sq_maxrate(c
->netdev
, sq
, tx_rate
);
1264 clear_bit(MLX5E_SQ_STATE_ENABLED
, &sq
->state
);
1265 mlx5e_free_txqsq(sq
);
1270 static void mlx5e_activate_txqsq(struct mlx5e_txqsq
*sq
)
1272 sq
->txq
= netdev_get_tx_queue(sq
->channel
->netdev
, sq
->txq_ix
);
1273 set_bit(MLX5E_SQ_STATE_ENABLED
, &sq
->state
);
1274 netdev_tx_reset_queue(sq
->txq
);
1275 netif_tx_start_queue(sq
->txq
);
1278 static inline void netif_tx_disable_queue(struct netdev_queue
*txq
)
1280 __netif_tx_lock_bh(txq
);
1281 netif_tx_stop_queue(txq
);
1282 __netif_tx_unlock_bh(txq
);
1285 static void mlx5e_deactivate_txqsq(struct mlx5e_txqsq
*sq
)
1287 struct mlx5e_channel
*c
= sq
->channel
;
1289 clear_bit(MLX5E_SQ_STATE_ENABLED
, &sq
->state
);
1290 /* prevent netif_tx_wake_queue */
1291 napi_synchronize(&c
->napi
);
1293 netif_tx_disable_queue(sq
->txq
);
1295 /* last doorbell out, godspeed .. */
1296 if (mlx5e_wqc_has_room_for(&sq
->wq
, sq
->cc
, sq
->pc
, 1)) {
1297 struct mlx5e_tx_wqe
*nop
;
1299 sq
->db
.wqe_info
[(sq
->pc
& sq
->wq
.sz_m1
)].skb
= NULL
;
1300 nop
= mlx5e_post_nop(&sq
->wq
, sq
->sqn
, &sq
->pc
);
1301 mlx5e_notify_hw(&sq
->wq
, sq
->pc
, sq
->uar_map
, &nop
->ctrl
);
1305 static void mlx5e_close_txqsq(struct mlx5e_txqsq
*sq
)
1307 struct mlx5e_channel
*c
= sq
->channel
;
1308 struct mlx5_core_dev
*mdev
= c
->mdev
;
1310 mlx5e_destroy_sq(mdev
, sq
->sqn
);
1312 mlx5_rl_remove_rate(mdev
, sq
->rate_limit
);
1313 mlx5e_free_txqsq_descs(sq
);
1314 mlx5e_free_txqsq(sq
);
1317 static int mlx5e_open_icosq(struct mlx5e_channel
*c
,
1318 struct mlx5e_params
*params
,
1319 struct mlx5e_sq_param
*param
,
1320 struct mlx5e_icosq
*sq
)
1322 struct mlx5e_create_sq_param csp
= {};
1325 err
= mlx5e_alloc_icosq(c
, param
, sq
);
1329 csp
.cqn
= sq
->cq
.mcq
.cqn
;
1330 csp
.wq_ctrl
= &sq
->wq_ctrl
;
1331 csp
.min_inline_mode
= params
->tx_min_inline_mode
;
1332 set_bit(MLX5E_SQ_STATE_ENABLED
, &sq
->state
);
1333 err
= mlx5e_create_sq_rdy(c
->mdev
, param
, &csp
, &sq
->sqn
);
1335 goto err_free_icosq
;
1340 clear_bit(MLX5E_SQ_STATE_ENABLED
, &sq
->state
);
1341 mlx5e_free_icosq(sq
);
1346 static void mlx5e_close_icosq(struct mlx5e_icosq
*sq
)
1348 struct mlx5e_channel
*c
= sq
->channel
;
1350 clear_bit(MLX5E_SQ_STATE_ENABLED
, &sq
->state
);
1351 napi_synchronize(&c
->napi
);
1353 mlx5e_destroy_sq(c
->mdev
, sq
->sqn
);
1354 mlx5e_free_icosq(sq
);
1357 static int mlx5e_open_xdpsq(struct mlx5e_channel
*c
,
1358 struct mlx5e_params
*params
,
1359 struct mlx5e_sq_param
*param
,
1360 struct mlx5e_xdpsq
*sq
)
1362 unsigned int ds_cnt
= MLX5E_XDP_TX_DS_COUNT
;
1363 struct mlx5e_create_sq_param csp
= {};
1364 unsigned int inline_hdr_sz
= 0;
1368 err
= mlx5e_alloc_xdpsq(c
, params
, param
, sq
);
1373 csp
.tisn
= c
->priv
->tisn
[0]; /* tc = 0 */
1374 csp
.cqn
= sq
->cq
.mcq
.cqn
;
1375 csp
.wq_ctrl
= &sq
->wq_ctrl
;
1376 csp
.min_inline_mode
= sq
->min_inline_mode
;
1377 set_bit(MLX5E_SQ_STATE_ENABLED
, &sq
->state
);
1378 err
= mlx5e_create_sq_rdy(c
->mdev
, param
, &csp
, &sq
->sqn
);
1380 goto err_free_xdpsq
;
1382 if (sq
->min_inline_mode
!= MLX5_INLINE_MODE_NONE
) {
1383 inline_hdr_sz
= MLX5E_XDP_MIN_INLINE
;
1387 /* Pre initialize fixed WQE fields */
1388 for (i
= 0; i
< mlx5_wq_cyc_get_size(&sq
->wq
); i
++) {
1389 struct mlx5e_tx_wqe
*wqe
= mlx5_wq_cyc_get_wqe(&sq
->wq
, i
);
1390 struct mlx5_wqe_ctrl_seg
*cseg
= &wqe
->ctrl
;
1391 struct mlx5_wqe_eth_seg
*eseg
= &wqe
->eth
;
1392 struct mlx5_wqe_data_seg
*dseg
;
1394 cseg
->qpn_ds
= cpu_to_be32((sq
->sqn
<< 8) | ds_cnt
);
1395 eseg
->inline_hdr
.sz
= cpu_to_be16(inline_hdr_sz
);
1397 dseg
= (struct mlx5_wqe_data_seg
*)cseg
+ (ds_cnt
- 1);
1398 dseg
->lkey
= sq
->mkey_be
;
1404 clear_bit(MLX5E_SQ_STATE_ENABLED
, &sq
->state
);
1405 mlx5e_free_xdpsq(sq
);
1410 static void mlx5e_close_xdpsq(struct mlx5e_xdpsq
*sq
)
1412 struct mlx5e_channel
*c
= sq
->channel
;
1414 clear_bit(MLX5E_SQ_STATE_ENABLED
, &sq
->state
);
1415 napi_synchronize(&c
->napi
);
1417 mlx5e_destroy_sq(c
->mdev
, sq
->sqn
);
1418 mlx5e_free_xdpsq_descs(sq
);
1419 mlx5e_free_xdpsq(sq
);
1422 static int mlx5e_alloc_cq_common(struct mlx5_core_dev
*mdev
,
1423 struct mlx5e_cq_param
*param
,
1424 struct mlx5e_cq
*cq
)
1426 struct mlx5_core_cq
*mcq
= &cq
->mcq
;
1432 err
= mlx5_cqwq_create(mdev
, ¶m
->wq
, param
->cqc
, &cq
->wq
,
1437 mlx5_vector2eqn(mdev
, param
->eq_ix
, &eqn_not_used
, &irqn
);
1440 mcq
->set_ci_db
= cq
->wq_ctrl
.db
.db
;
1441 mcq
->arm_db
= cq
->wq_ctrl
.db
.db
+ 1;
1442 *mcq
->set_ci_db
= 0;
1444 mcq
->vector
= param
->eq_ix
;
1445 mcq
->comp
= mlx5e_completion_event
;
1446 mcq
->event
= mlx5e_cq_error_event
;
1449 for (i
= 0; i
< mlx5_cqwq_get_size(&cq
->wq
); i
++) {
1450 struct mlx5_cqe64
*cqe
= mlx5_cqwq_get_wqe(&cq
->wq
, i
);
1460 static int mlx5e_alloc_cq(struct mlx5e_channel
*c
,
1461 struct mlx5e_cq_param
*param
,
1462 struct mlx5e_cq
*cq
)
1464 struct mlx5_core_dev
*mdev
= c
->priv
->mdev
;
1467 param
->wq
.buf_numa_node
= cpu_to_node(c
->cpu
);
1468 param
->wq
.db_numa_node
= cpu_to_node(c
->cpu
);
1469 param
->eq_ix
= c
->ix
;
1471 err
= mlx5e_alloc_cq_common(mdev
, param
, cq
);
1473 cq
->napi
= &c
->napi
;
1479 static void mlx5e_free_cq(struct mlx5e_cq
*cq
)
1481 mlx5_cqwq_destroy(&cq
->wq_ctrl
);
1484 static int mlx5e_create_cq(struct mlx5e_cq
*cq
, struct mlx5e_cq_param
*param
)
1486 struct mlx5_core_dev
*mdev
= cq
->mdev
;
1487 struct mlx5_core_cq
*mcq
= &cq
->mcq
;
1492 unsigned int irqn_not_used
;
1496 inlen
= MLX5_ST_SZ_BYTES(create_cq_in
) +
1497 sizeof(u64
) * cq
->wq_ctrl
.frag_buf
.npages
;
1498 in
= mlx5_vzalloc(inlen
);
1502 cqc
= MLX5_ADDR_OF(create_cq_in
, in
, cq_context
);
1504 memcpy(cqc
, param
->cqc
, sizeof(param
->cqc
));
1506 mlx5_fill_page_frag_array(&cq
->wq_ctrl
.frag_buf
,
1507 (__be64
*)MLX5_ADDR_OF(create_cq_in
, in
, pas
));
1509 mlx5_vector2eqn(mdev
, param
->eq_ix
, &eqn
, &irqn_not_used
);
1511 MLX5_SET(cqc
, cqc
, cq_period_mode
, param
->cq_period_mode
);
1512 MLX5_SET(cqc
, cqc
, c_eqn
, eqn
);
1513 MLX5_SET(cqc
, cqc
, uar_page
, mdev
->priv
.uar
->index
);
1514 MLX5_SET(cqc
, cqc
, log_page_size
, cq
->wq_ctrl
.frag_buf
.page_shift
-
1515 MLX5_ADAPTER_PAGE_SHIFT
);
1516 MLX5_SET64(cqc
, cqc
, dbr_addr
, cq
->wq_ctrl
.db
.dma
);
1518 err
= mlx5_core_create_cq(mdev
, mcq
, in
, inlen
);
1530 static void mlx5e_destroy_cq(struct mlx5e_cq
*cq
)
1532 mlx5_core_destroy_cq(cq
->mdev
, &cq
->mcq
);
1535 static int mlx5e_open_cq(struct mlx5e_channel
*c
,
1536 struct mlx5e_cq_moder moder
,
1537 struct mlx5e_cq_param
*param
,
1538 struct mlx5e_cq
*cq
)
1540 struct mlx5_core_dev
*mdev
= c
->mdev
;
1543 err
= mlx5e_alloc_cq(c
, param
, cq
);
1547 err
= mlx5e_create_cq(cq
, param
);
1551 if (MLX5_CAP_GEN(mdev
, cq_moderation
))
1552 mlx5_core_modify_cq_moderation(mdev
, &cq
->mcq
, moder
.usec
, moder
.pkts
);
1561 static void mlx5e_close_cq(struct mlx5e_cq
*cq
)
1563 mlx5e_destroy_cq(cq
);
1567 static int mlx5e_get_cpu(struct mlx5e_priv
*priv
, int ix
)
1569 return cpumask_first(priv
->mdev
->priv
.irq_info
[ix
].mask
);
1572 static int mlx5e_open_tx_cqs(struct mlx5e_channel
*c
,
1573 struct mlx5e_params
*params
,
1574 struct mlx5e_channel_param
*cparam
)
1579 for (tc
= 0; tc
< c
->num_tc
; tc
++) {
1580 err
= mlx5e_open_cq(c
, params
->tx_cq_moderation
,
1581 &cparam
->tx_cq
, &c
->sq
[tc
].cq
);
1583 goto err_close_tx_cqs
;
1589 for (tc
--; tc
>= 0; tc
--)
1590 mlx5e_close_cq(&c
->sq
[tc
].cq
);
1595 static void mlx5e_close_tx_cqs(struct mlx5e_channel
*c
)
1599 for (tc
= 0; tc
< c
->num_tc
; tc
++)
1600 mlx5e_close_cq(&c
->sq
[tc
].cq
);
1603 static int mlx5e_open_sqs(struct mlx5e_channel
*c
,
1604 struct mlx5e_params
*params
,
1605 struct mlx5e_channel_param
*cparam
)
1610 for (tc
= 0; tc
< params
->num_tc
; tc
++) {
1611 int txq_ix
= c
->ix
+ tc
* params
->num_channels
;
1613 err
= mlx5e_open_txqsq(c
, c
->priv
->tisn
[tc
], txq_ix
,
1614 params
, &cparam
->sq
, &c
->sq
[tc
]);
1622 for (tc
--; tc
>= 0; tc
--)
1623 mlx5e_close_txqsq(&c
->sq
[tc
]);
1628 static void mlx5e_close_sqs(struct mlx5e_channel
*c
)
1632 for (tc
= 0; tc
< c
->num_tc
; tc
++)
1633 mlx5e_close_txqsq(&c
->sq
[tc
]);
1636 static int mlx5e_set_sq_maxrate(struct net_device
*dev
,
1637 struct mlx5e_txqsq
*sq
, u32 rate
)
1639 struct mlx5e_priv
*priv
= netdev_priv(dev
);
1640 struct mlx5_core_dev
*mdev
= priv
->mdev
;
1641 struct mlx5e_modify_sq_param msp
= {0};
1645 if (rate
== sq
->rate_limit
)
1650 /* remove current rl index to free space to next ones */
1651 mlx5_rl_remove_rate(mdev
, sq
->rate_limit
);
1656 err
= mlx5_rl_add_rate(mdev
, rate
, &rl_index
);
1658 netdev_err(dev
, "Failed configuring rate %u: %d\n",
1664 msp
.curr_state
= MLX5_SQC_STATE_RDY
;
1665 msp
.next_state
= MLX5_SQC_STATE_RDY
;
1666 msp
.rl_index
= rl_index
;
1667 msp
.rl_update
= true;
1668 err
= mlx5e_modify_sq(mdev
, sq
->sqn
, &msp
);
1670 netdev_err(dev
, "Failed configuring rate %u: %d\n",
1672 /* remove the rate from the table */
1674 mlx5_rl_remove_rate(mdev
, rate
);
1678 sq
->rate_limit
= rate
;
1682 static int mlx5e_set_tx_maxrate(struct net_device
*dev
, int index
, u32 rate
)
1684 struct mlx5e_priv
*priv
= netdev_priv(dev
);
1685 struct mlx5_core_dev
*mdev
= priv
->mdev
;
1686 struct mlx5e_txqsq
*sq
= priv
->txq2sq
[index
];
1689 if (!mlx5_rl_is_supported(mdev
)) {
1690 netdev_err(dev
, "Rate limiting is not supported on this device\n");
1694 /* rate is given in Mb/sec, HW config is in Kb/sec */
1697 /* Check whether rate in valid range, 0 is always valid */
1698 if (rate
&& !mlx5_rl_is_in_range(mdev
, rate
)) {
1699 netdev_err(dev
, "TX rate %u, is not in range\n", rate
);
1703 mutex_lock(&priv
->state_lock
);
1704 if (test_bit(MLX5E_STATE_OPENED
, &priv
->state
))
1705 err
= mlx5e_set_sq_maxrate(dev
, sq
, rate
);
1707 priv
->tx_rates
[index
] = rate
;
1708 mutex_unlock(&priv
->state_lock
);
1713 static int mlx5e_open_channel(struct mlx5e_priv
*priv
, int ix
,
1714 struct mlx5e_params
*params
,
1715 struct mlx5e_channel_param
*cparam
,
1716 struct mlx5e_channel
**cp
)
1718 struct mlx5e_cq_moder icocq_moder
= {0, 0};
1719 struct net_device
*netdev
= priv
->netdev
;
1720 int cpu
= mlx5e_get_cpu(priv
, ix
);
1721 struct mlx5e_channel
*c
;
1724 c
= kzalloc_node(sizeof(*c
), GFP_KERNEL
, cpu_to_node(cpu
));
1729 c
->mdev
= priv
->mdev
;
1730 c
->tstamp
= &priv
->tstamp
;
1733 c
->pdev
= &priv
->mdev
->pdev
->dev
;
1734 c
->netdev
= priv
->netdev
;
1735 c
->mkey_be
= cpu_to_be32(priv
->mdev
->mlx5e_res
.mkey
.key
);
1736 c
->num_tc
= params
->num_tc
;
1737 c
->xdp
= !!params
->xdp_prog
;
1739 netif_napi_add(netdev
, &c
->napi
, mlx5e_napi_poll
, 64);
1741 err
= mlx5e_open_cq(c
, icocq_moder
, &cparam
->icosq_cq
, &c
->icosq
.cq
);
1745 err
= mlx5e_open_tx_cqs(c
, params
, cparam
);
1747 goto err_close_icosq_cq
;
1749 err
= mlx5e_open_cq(c
, params
->rx_cq_moderation
, &cparam
->rx_cq
, &c
->rq
.cq
);
1751 goto err_close_tx_cqs
;
1753 /* XDP SQ CQ params are same as normal TXQ sq CQ params */
1754 err
= c
->xdp
? mlx5e_open_cq(c
, params
->tx_cq_moderation
,
1755 &cparam
->tx_cq
, &c
->rq
.xdpsq
.cq
) : 0;
1757 goto err_close_rx_cq
;
1759 napi_enable(&c
->napi
);
1761 err
= mlx5e_open_icosq(c
, params
, &cparam
->icosq
, &c
->icosq
);
1763 goto err_disable_napi
;
1765 err
= mlx5e_open_sqs(c
, params
, cparam
);
1767 goto err_close_icosq
;
1769 err
= c
->xdp
? mlx5e_open_xdpsq(c
, params
, &cparam
->xdp_sq
, &c
->rq
.xdpsq
) : 0;
1773 err
= mlx5e_open_rq(c
, params
, &cparam
->rq
, &c
->rq
);
1775 goto err_close_xdp_sq
;
1782 mlx5e_close_xdpsq(&c
->rq
.xdpsq
);
1788 mlx5e_close_icosq(&c
->icosq
);
1791 napi_disable(&c
->napi
);
1793 mlx5e_close_cq(&c
->rq
.xdpsq
.cq
);
1796 mlx5e_close_cq(&c
->rq
.cq
);
1799 mlx5e_close_tx_cqs(c
);
1802 mlx5e_close_cq(&c
->icosq
.cq
);
1805 netif_napi_del(&c
->napi
);
1811 static void mlx5e_activate_channel(struct mlx5e_channel
*c
)
1815 for (tc
= 0; tc
< c
->num_tc
; tc
++)
1816 mlx5e_activate_txqsq(&c
->sq
[tc
]);
1817 mlx5e_activate_rq(&c
->rq
);
1818 netif_set_xps_queue(c
->netdev
, get_cpu_mask(c
->cpu
), c
->ix
);
1821 static void mlx5e_deactivate_channel(struct mlx5e_channel
*c
)
1825 mlx5e_deactivate_rq(&c
->rq
);
1826 for (tc
= 0; tc
< c
->num_tc
; tc
++)
1827 mlx5e_deactivate_txqsq(&c
->sq
[tc
]);
1830 static void mlx5e_close_channel(struct mlx5e_channel
*c
)
1832 mlx5e_close_rq(&c
->rq
);
1834 mlx5e_close_xdpsq(&c
->rq
.xdpsq
);
1836 mlx5e_close_icosq(&c
->icosq
);
1837 napi_disable(&c
->napi
);
1839 mlx5e_close_cq(&c
->rq
.xdpsq
.cq
);
1840 mlx5e_close_cq(&c
->rq
.cq
);
1841 mlx5e_close_tx_cqs(c
);
1842 mlx5e_close_cq(&c
->icosq
.cq
);
1843 netif_napi_del(&c
->napi
);
1848 static void mlx5e_build_rq_param(struct mlx5e_priv
*priv
,
1849 struct mlx5e_params
*params
,
1850 struct mlx5e_rq_param
*param
)
1852 void *rqc
= param
->rqc
;
1853 void *wq
= MLX5_ADDR_OF(rqc
, rqc
, wq
);
1855 switch (params
->rq_wq_type
) {
1856 case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ
:
1857 MLX5_SET(wq
, wq
, log_wqe_num_of_strides
, params
->mpwqe_log_num_strides
- 9);
1858 MLX5_SET(wq
, wq
, log_wqe_stride_size
, params
->mpwqe_log_stride_sz
- 6);
1859 MLX5_SET(wq
, wq
, wq_type
, MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ
);
1861 default: /* MLX5_WQ_TYPE_LINKED_LIST */
1862 MLX5_SET(wq
, wq
, wq_type
, MLX5_WQ_TYPE_LINKED_LIST
);
1865 MLX5_SET(wq
, wq
, end_padding_mode
, MLX5_WQ_END_PAD_MODE_ALIGN
);
1866 MLX5_SET(wq
, wq
, log_wq_stride
, ilog2(sizeof(struct mlx5e_rx_wqe
)));
1867 MLX5_SET(wq
, wq
, log_wq_sz
, params
->log_rq_size
);
1868 MLX5_SET(wq
, wq
, pd
, priv
->mdev
->mlx5e_res
.pdn
);
1869 MLX5_SET(rqc
, rqc
, counter_set_id
, priv
->q_counter
);
1870 MLX5_SET(rqc
, rqc
, vsd
, params
->vlan_strip_disable
);
1871 MLX5_SET(rqc
, rqc
, scatter_fcs
, params
->scatter_fcs_en
);
1873 param
->wq
.buf_numa_node
= dev_to_node(&priv
->mdev
->pdev
->dev
);
1874 param
->wq
.linear
= 1;
1877 static void mlx5e_build_drop_rq_param(struct mlx5e_rq_param
*param
)
1879 void *rqc
= param
->rqc
;
1880 void *wq
= MLX5_ADDR_OF(rqc
, rqc
, wq
);
1882 MLX5_SET(wq
, wq
, wq_type
, MLX5_WQ_TYPE_LINKED_LIST
);
1883 MLX5_SET(wq
, wq
, log_wq_stride
, ilog2(sizeof(struct mlx5e_rx_wqe
)));
1886 static void mlx5e_build_sq_param_common(struct mlx5e_priv
*priv
,
1887 struct mlx5e_sq_param
*param
)
1889 void *sqc
= param
->sqc
;
1890 void *wq
= MLX5_ADDR_OF(sqc
, sqc
, wq
);
1892 MLX5_SET(wq
, wq
, log_wq_stride
, ilog2(MLX5_SEND_WQE_BB
));
1893 MLX5_SET(wq
, wq
, pd
, priv
->mdev
->mlx5e_res
.pdn
);
1895 param
->wq
.buf_numa_node
= dev_to_node(&priv
->mdev
->pdev
->dev
);
1898 static void mlx5e_build_sq_param(struct mlx5e_priv
*priv
,
1899 struct mlx5e_params
*params
,
1900 struct mlx5e_sq_param
*param
)
1902 void *sqc
= param
->sqc
;
1903 void *wq
= MLX5_ADDR_OF(sqc
, sqc
, wq
);
1905 mlx5e_build_sq_param_common(priv
, param
);
1906 MLX5_SET(wq
, wq
, log_wq_sz
, params
->log_sq_size
);
1909 static void mlx5e_build_common_cq_param(struct mlx5e_priv
*priv
,
1910 struct mlx5e_cq_param
*param
)
1912 void *cqc
= param
->cqc
;
1914 MLX5_SET(cqc
, cqc
, uar_page
, priv
->mdev
->priv
.uar
->index
);
1917 static void mlx5e_build_rx_cq_param(struct mlx5e_priv
*priv
,
1918 struct mlx5e_params
*params
,
1919 struct mlx5e_cq_param
*param
)
1921 void *cqc
= param
->cqc
;
1924 switch (params
->rq_wq_type
) {
1925 case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ
:
1926 log_cq_size
= params
->log_rq_size
+ params
->mpwqe_log_num_strides
;
1928 default: /* MLX5_WQ_TYPE_LINKED_LIST */
1929 log_cq_size
= params
->log_rq_size
;
1932 MLX5_SET(cqc
, cqc
, log_cq_size
, log_cq_size
);
1933 if (MLX5E_GET_PFLAG(params
, MLX5E_PFLAG_RX_CQE_COMPRESS
)) {
1934 MLX5_SET(cqc
, cqc
, mini_cqe_res_format
, MLX5_CQE_FORMAT_CSUM
);
1935 MLX5_SET(cqc
, cqc
, cqe_comp_en
, 1);
1938 mlx5e_build_common_cq_param(priv
, param
);
1941 static void mlx5e_build_tx_cq_param(struct mlx5e_priv
*priv
,
1942 struct mlx5e_params
*params
,
1943 struct mlx5e_cq_param
*param
)
1945 void *cqc
= param
->cqc
;
1947 MLX5_SET(cqc
, cqc
, log_cq_size
, params
->log_sq_size
);
1949 mlx5e_build_common_cq_param(priv
, param
);
1951 param
->cq_period_mode
= MLX5_CQ_PERIOD_MODE_START_FROM_EQE
;
1954 static void mlx5e_build_ico_cq_param(struct mlx5e_priv
*priv
,
1956 struct mlx5e_cq_param
*param
)
1958 void *cqc
= param
->cqc
;
1960 MLX5_SET(cqc
, cqc
, log_cq_size
, log_wq_size
);
1962 mlx5e_build_common_cq_param(priv
, param
);
1964 param
->cq_period_mode
= MLX5_CQ_PERIOD_MODE_START_FROM_EQE
;
1967 static void mlx5e_build_icosq_param(struct mlx5e_priv
*priv
,
1969 struct mlx5e_sq_param
*param
)
1971 void *sqc
= param
->sqc
;
1972 void *wq
= MLX5_ADDR_OF(sqc
, sqc
, wq
);
1974 mlx5e_build_sq_param_common(priv
, param
);
1976 MLX5_SET(wq
, wq
, log_wq_sz
, log_wq_size
);
1977 MLX5_SET(sqc
, sqc
, reg_umr
, MLX5_CAP_ETH(priv
->mdev
, reg_umr_sq
));
1980 static void mlx5e_build_xdpsq_param(struct mlx5e_priv
*priv
,
1981 struct mlx5e_params
*params
,
1982 struct mlx5e_sq_param
*param
)
1984 void *sqc
= param
->sqc
;
1985 void *wq
= MLX5_ADDR_OF(sqc
, sqc
, wq
);
1987 mlx5e_build_sq_param_common(priv
, param
);
1988 MLX5_SET(wq
, wq
, log_wq_sz
, params
->log_sq_size
);
1991 static void mlx5e_build_channel_param(struct mlx5e_priv
*priv
,
1992 struct mlx5e_params
*params
,
1993 struct mlx5e_channel_param
*cparam
)
1995 u8 icosq_log_wq_sz
= MLX5E_PARAMS_MINIMUM_LOG_SQ_SIZE
;
1997 mlx5e_build_rq_param(priv
, params
, &cparam
->rq
);
1998 mlx5e_build_sq_param(priv
, params
, &cparam
->sq
);
1999 mlx5e_build_xdpsq_param(priv
, params
, &cparam
->xdp_sq
);
2000 mlx5e_build_icosq_param(priv
, icosq_log_wq_sz
, &cparam
->icosq
);
2001 mlx5e_build_rx_cq_param(priv
, params
, &cparam
->rx_cq
);
2002 mlx5e_build_tx_cq_param(priv
, params
, &cparam
->tx_cq
);
2003 mlx5e_build_ico_cq_param(priv
, icosq_log_wq_sz
, &cparam
->icosq_cq
);
2006 int mlx5e_open_channels(struct mlx5e_priv
*priv
,
2007 struct mlx5e_channels
*chs
)
2009 struct mlx5e_channel_param
*cparam
;
2013 chs
->num
= chs
->params
.num_channels
;
2015 chs
->c
= kcalloc(chs
->num
, sizeof(struct mlx5e_channel
*), GFP_KERNEL
);
2016 cparam
= kzalloc(sizeof(struct mlx5e_channel_param
), GFP_KERNEL
);
2017 if (!chs
->c
|| !cparam
)
2020 mlx5e_build_channel_param(priv
, &chs
->params
, cparam
);
2021 for (i
= 0; i
< chs
->num
; i
++) {
2022 err
= mlx5e_open_channel(priv
, i
, &chs
->params
, cparam
, &chs
->c
[i
]);
2024 goto err_close_channels
;
2031 for (i
--; i
>= 0; i
--)
2032 mlx5e_close_channel(chs
->c
[i
]);
2041 static void mlx5e_activate_channels(struct mlx5e_channels
*chs
)
2045 for (i
= 0; i
< chs
->num
; i
++)
2046 mlx5e_activate_channel(chs
->c
[i
]);
2049 static int mlx5e_wait_channels_min_rx_wqes(struct mlx5e_channels
*chs
)
2054 for (i
= 0; i
< chs
->num
; i
++) {
2055 err
= mlx5e_wait_for_min_rx_wqes(&chs
->c
[i
]->rq
);
2063 static void mlx5e_deactivate_channels(struct mlx5e_channels
*chs
)
2067 for (i
= 0; i
< chs
->num
; i
++)
2068 mlx5e_deactivate_channel(chs
->c
[i
]);
2071 void mlx5e_close_channels(struct mlx5e_channels
*chs
)
2075 for (i
= 0; i
< chs
->num
; i
++)
2076 mlx5e_close_channel(chs
->c
[i
]);
2083 mlx5e_create_rqt(struct mlx5e_priv
*priv
, int sz
, struct mlx5e_rqt
*rqt
)
2085 struct mlx5_core_dev
*mdev
= priv
->mdev
;
2092 inlen
= MLX5_ST_SZ_BYTES(create_rqt_in
) + sizeof(u32
) * sz
;
2093 in
= mlx5_vzalloc(inlen
);
2097 rqtc
= MLX5_ADDR_OF(create_rqt_in
, in
, rqt_context
);
2099 MLX5_SET(rqtc
, rqtc
, rqt_actual_size
, sz
);
2100 MLX5_SET(rqtc
, rqtc
, rqt_max_size
, sz
);
2102 for (i
= 0; i
< sz
; i
++)
2103 MLX5_SET(rqtc
, rqtc
, rq_num
[i
], priv
->drop_rq
.rqn
);
2105 err
= mlx5_core_create_rqt(mdev
, in
, inlen
, &rqt
->rqtn
);
2107 rqt
->enabled
= true;
2113 void mlx5e_destroy_rqt(struct mlx5e_priv
*priv
, struct mlx5e_rqt
*rqt
)
2115 rqt
->enabled
= false;
2116 mlx5_core_destroy_rqt(priv
->mdev
, rqt
->rqtn
);
2119 int mlx5e_create_indirect_rqt(struct mlx5e_priv
*priv
)
2121 struct mlx5e_rqt
*rqt
= &priv
->indir_rqt
;
2124 err
= mlx5e_create_rqt(priv
, MLX5E_INDIR_RQT_SIZE
, rqt
);
2126 mlx5_core_warn(priv
->mdev
, "create indirect rqts failed, %d\n", err
);
2130 int mlx5e_create_direct_rqts(struct mlx5e_priv
*priv
)
2132 struct mlx5e_rqt
*rqt
;
2136 for (ix
= 0; ix
< priv
->profile
->max_nch(priv
->mdev
); ix
++) {
2137 rqt
= &priv
->direct_tir
[ix
].rqt
;
2138 err
= mlx5e_create_rqt(priv
, 1 /*size */, rqt
);
2140 goto err_destroy_rqts
;
2146 mlx5_core_warn(priv
->mdev
, "create direct rqts failed, %d\n", err
);
2147 for (ix
--; ix
>= 0; ix
--)
2148 mlx5e_destroy_rqt(priv
, &priv
->direct_tir
[ix
].rqt
);
2153 void mlx5e_destroy_direct_rqts(struct mlx5e_priv
*priv
)
2157 for (i
= 0; i
< priv
->profile
->max_nch(priv
->mdev
); i
++)
2158 mlx5e_destroy_rqt(priv
, &priv
->direct_tir
[i
].rqt
);
2161 static int mlx5e_rx_hash_fn(int hfunc
)
2163 return (hfunc
== ETH_RSS_HASH_TOP
) ?
2164 MLX5_RX_HASH_FN_TOEPLITZ
:
2165 MLX5_RX_HASH_FN_INVERTED_XOR8
;
2168 static int mlx5e_bits_invert(unsigned long a
, int size
)
2173 for (i
= 0; i
< size
; i
++)
2174 inv
|= (test_bit(size
- i
- 1, &a
) ? 1 : 0) << i
;
2179 static void mlx5e_fill_rqt_rqns(struct mlx5e_priv
*priv
, int sz
,
2180 struct mlx5e_redirect_rqt_param rrp
, void *rqtc
)
2184 for (i
= 0; i
< sz
; i
++) {
2190 if (rrp
.rss
.hfunc
== ETH_RSS_HASH_XOR
)
2191 ix
= mlx5e_bits_invert(i
, ilog2(sz
));
2193 ix
= priv
->channels
.params
.indirection_rqt
[ix
];
2194 rqn
= rrp
.rss
.channels
->c
[ix
]->rq
.rqn
;
2198 MLX5_SET(rqtc
, rqtc
, rq_num
[i
], rqn
);
2202 int mlx5e_redirect_rqt(struct mlx5e_priv
*priv
, u32 rqtn
, int sz
,
2203 struct mlx5e_redirect_rqt_param rrp
)
2205 struct mlx5_core_dev
*mdev
= priv
->mdev
;
2211 inlen
= MLX5_ST_SZ_BYTES(modify_rqt_in
) + sizeof(u32
) * sz
;
2212 in
= mlx5_vzalloc(inlen
);
2216 rqtc
= MLX5_ADDR_OF(modify_rqt_in
, in
, ctx
);
2218 MLX5_SET(rqtc
, rqtc
, rqt_actual_size
, sz
);
2219 MLX5_SET(modify_rqt_in
, in
, bitmask
.rqn_list
, 1);
2220 mlx5e_fill_rqt_rqns(priv
, sz
, rrp
, rqtc
);
2221 err
= mlx5_core_modify_rqt(mdev
, rqtn
, in
, inlen
);
2227 static u32
mlx5e_get_direct_rqn(struct mlx5e_priv
*priv
, int ix
,
2228 struct mlx5e_redirect_rqt_param rrp
)
2233 if (ix
>= rrp
.rss
.channels
->num
)
2234 return priv
->drop_rq
.rqn
;
2236 return rrp
.rss
.channels
->c
[ix
]->rq
.rqn
;
2239 static void mlx5e_redirect_rqts(struct mlx5e_priv
*priv
,
2240 struct mlx5e_redirect_rqt_param rrp
)
2245 if (priv
->indir_rqt
.enabled
) {
2247 rqtn
= priv
->indir_rqt
.rqtn
;
2248 mlx5e_redirect_rqt(priv
, rqtn
, MLX5E_INDIR_RQT_SIZE
, rrp
);
2251 for (ix
= 0; ix
< priv
->profile
->max_nch(priv
->mdev
); ix
++) {
2252 struct mlx5e_redirect_rqt_param direct_rrp
= {
2255 .rqn
= mlx5e_get_direct_rqn(priv
, ix
, rrp
)
2259 /* Direct RQ Tables */
2260 if (!priv
->direct_tir
[ix
].rqt
.enabled
)
2263 rqtn
= priv
->direct_tir
[ix
].rqt
.rqtn
;
2264 mlx5e_redirect_rqt(priv
, rqtn
, 1, direct_rrp
);
2268 static void mlx5e_redirect_rqts_to_channels(struct mlx5e_priv
*priv
,
2269 struct mlx5e_channels
*chs
)
2271 struct mlx5e_redirect_rqt_param rrp
= {
2276 .hfunc
= chs
->params
.rss_hfunc
,
2281 mlx5e_redirect_rqts(priv
, rrp
);
2284 static void mlx5e_redirect_rqts_to_drop(struct mlx5e_priv
*priv
)
2286 struct mlx5e_redirect_rqt_param drop_rrp
= {
2289 .rqn
= priv
->drop_rq
.rqn
,
2293 mlx5e_redirect_rqts(priv
, drop_rrp
);
2296 static void mlx5e_build_tir_ctx_lro(struct mlx5e_params
*params
, void *tirc
)
2298 if (!params
->lro_en
)
2301 #define ROUGH_MAX_L2_L3_HDR_SZ 256
2303 MLX5_SET(tirc
, tirc
, lro_enable_mask
,
2304 MLX5_TIRC_LRO_ENABLE_MASK_IPV4_LRO
|
2305 MLX5_TIRC_LRO_ENABLE_MASK_IPV6_LRO
);
2306 MLX5_SET(tirc
, tirc
, lro_max_ip_payload_size
,
2307 (params
->lro_wqe_sz
- ROUGH_MAX_L2_L3_HDR_SZ
) >> 8);
2308 MLX5_SET(tirc
, tirc
, lro_timeout_period_usecs
, params
->lro_timeout
);
2311 void mlx5e_build_indir_tir_ctx_hash(struct mlx5e_params
*params
,
2312 enum mlx5e_traffic_types tt
,
2315 void *hfso
= MLX5_ADDR_OF(tirc
, tirc
, rx_hash_field_selector_outer
);
2317 #define MLX5_HASH_IP (MLX5_HASH_FIELD_SEL_SRC_IP |\
2318 MLX5_HASH_FIELD_SEL_DST_IP)
2320 #define MLX5_HASH_IP_L4PORTS (MLX5_HASH_FIELD_SEL_SRC_IP |\
2321 MLX5_HASH_FIELD_SEL_DST_IP |\
2322 MLX5_HASH_FIELD_SEL_L4_SPORT |\
2323 MLX5_HASH_FIELD_SEL_L4_DPORT)
2325 #define MLX5_HASH_IP_IPSEC_SPI (MLX5_HASH_FIELD_SEL_SRC_IP |\
2326 MLX5_HASH_FIELD_SEL_DST_IP |\
2327 MLX5_HASH_FIELD_SEL_IPSEC_SPI)
2329 MLX5_SET(tirc
, tirc
, rx_hash_fn
, mlx5e_rx_hash_fn(params
->rss_hfunc
));
2330 if (params
->rss_hfunc
== ETH_RSS_HASH_TOP
) {
2331 void *rss_key
= MLX5_ADDR_OF(tirc
, tirc
,
2332 rx_hash_toeplitz_key
);
2333 size_t len
= MLX5_FLD_SZ_BYTES(tirc
,
2334 rx_hash_toeplitz_key
);
2336 MLX5_SET(tirc
, tirc
, rx_hash_symmetric
, 1);
2337 memcpy(rss_key
, params
->toeplitz_hash_key
, len
);
2341 case MLX5E_TT_IPV4_TCP
:
2342 MLX5_SET(rx_hash_field_select
, hfso
, l3_prot_type
,
2343 MLX5_L3_PROT_TYPE_IPV4
);
2344 MLX5_SET(rx_hash_field_select
, hfso
, l4_prot_type
,
2345 MLX5_L4_PROT_TYPE_TCP
);
2346 MLX5_SET(rx_hash_field_select
, hfso
, selected_fields
,
2347 MLX5_HASH_IP_L4PORTS
);
2350 case MLX5E_TT_IPV6_TCP
:
2351 MLX5_SET(rx_hash_field_select
, hfso
, l3_prot_type
,
2352 MLX5_L3_PROT_TYPE_IPV6
);
2353 MLX5_SET(rx_hash_field_select
, hfso
, l4_prot_type
,
2354 MLX5_L4_PROT_TYPE_TCP
);
2355 MLX5_SET(rx_hash_field_select
, hfso
, selected_fields
,
2356 MLX5_HASH_IP_L4PORTS
);
2359 case MLX5E_TT_IPV4_UDP
:
2360 MLX5_SET(rx_hash_field_select
, hfso
, l3_prot_type
,
2361 MLX5_L3_PROT_TYPE_IPV4
);
2362 MLX5_SET(rx_hash_field_select
, hfso
, l4_prot_type
,
2363 MLX5_L4_PROT_TYPE_UDP
);
2364 MLX5_SET(rx_hash_field_select
, hfso
, selected_fields
,
2365 MLX5_HASH_IP_L4PORTS
);
2368 case MLX5E_TT_IPV6_UDP
:
2369 MLX5_SET(rx_hash_field_select
, hfso
, l3_prot_type
,
2370 MLX5_L3_PROT_TYPE_IPV6
);
2371 MLX5_SET(rx_hash_field_select
, hfso
, l4_prot_type
,
2372 MLX5_L4_PROT_TYPE_UDP
);
2373 MLX5_SET(rx_hash_field_select
, hfso
, selected_fields
,
2374 MLX5_HASH_IP_L4PORTS
);
2377 case MLX5E_TT_IPV4_IPSEC_AH
:
2378 MLX5_SET(rx_hash_field_select
, hfso
, l3_prot_type
,
2379 MLX5_L3_PROT_TYPE_IPV4
);
2380 MLX5_SET(rx_hash_field_select
, hfso
, selected_fields
,
2381 MLX5_HASH_IP_IPSEC_SPI
);
2384 case MLX5E_TT_IPV6_IPSEC_AH
:
2385 MLX5_SET(rx_hash_field_select
, hfso
, l3_prot_type
,
2386 MLX5_L3_PROT_TYPE_IPV6
);
2387 MLX5_SET(rx_hash_field_select
, hfso
, selected_fields
,
2388 MLX5_HASH_IP_IPSEC_SPI
);
2391 case MLX5E_TT_IPV4_IPSEC_ESP
:
2392 MLX5_SET(rx_hash_field_select
, hfso
, l3_prot_type
,
2393 MLX5_L3_PROT_TYPE_IPV4
);
2394 MLX5_SET(rx_hash_field_select
, hfso
, selected_fields
,
2395 MLX5_HASH_IP_IPSEC_SPI
);
2398 case MLX5E_TT_IPV6_IPSEC_ESP
:
2399 MLX5_SET(rx_hash_field_select
, hfso
, l3_prot_type
,
2400 MLX5_L3_PROT_TYPE_IPV6
);
2401 MLX5_SET(rx_hash_field_select
, hfso
, selected_fields
,
2402 MLX5_HASH_IP_IPSEC_SPI
);
2406 MLX5_SET(rx_hash_field_select
, hfso
, l3_prot_type
,
2407 MLX5_L3_PROT_TYPE_IPV4
);
2408 MLX5_SET(rx_hash_field_select
, hfso
, selected_fields
,
2413 MLX5_SET(rx_hash_field_select
, hfso
, l3_prot_type
,
2414 MLX5_L3_PROT_TYPE_IPV6
);
2415 MLX5_SET(rx_hash_field_select
, hfso
, selected_fields
,
2419 WARN_ONCE(true, "%s: bad traffic type!\n", __func__
);
2423 static int mlx5e_modify_tirs_lro(struct mlx5e_priv
*priv
)
2425 struct mlx5_core_dev
*mdev
= priv
->mdev
;
2434 inlen
= MLX5_ST_SZ_BYTES(modify_tir_in
);
2435 in
= mlx5_vzalloc(inlen
);
2439 MLX5_SET(modify_tir_in
, in
, bitmask
.lro
, 1);
2440 tirc
= MLX5_ADDR_OF(modify_tir_in
, in
, ctx
);
2442 mlx5e_build_tir_ctx_lro(&priv
->channels
.params
, tirc
);
2444 for (tt
= 0; tt
< MLX5E_NUM_INDIR_TIRS
; tt
++) {
2445 err
= mlx5_core_modify_tir(mdev
, priv
->indir_tir
[tt
].tirn
, in
,
2451 for (ix
= 0; ix
< priv
->profile
->max_nch(priv
->mdev
); ix
++) {
2452 err
= mlx5_core_modify_tir(mdev
, priv
->direct_tir
[ix
].tirn
,
2464 static int mlx5e_set_mtu(struct mlx5e_priv
*priv
, u16 mtu
)
2466 struct mlx5_core_dev
*mdev
= priv
->mdev
;
2467 u16 hw_mtu
= MLX5E_SW2HW_MTU(mtu
);
2470 err
= mlx5_set_port_mtu(mdev
, hw_mtu
, 1);
2474 /* Update vport context MTU */
2475 mlx5_modify_nic_vport_mtu(mdev
, hw_mtu
);
2479 static void mlx5e_query_mtu(struct mlx5e_priv
*priv
, u16
*mtu
)
2481 struct mlx5_core_dev
*mdev
= priv
->mdev
;
2485 err
= mlx5_query_nic_vport_mtu(mdev
, &hw_mtu
);
2486 if (err
|| !hw_mtu
) /* fallback to port oper mtu */
2487 mlx5_query_port_oper_mtu(mdev
, &hw_mtu
, 1);
2489 *mtu
= MLX5E_HW2SW_MTU(hw_mtu
);
2492 static int mlx5e_set_dev_port_mtu(struct mlx5e_priv
*priv
)
2494 struct net_device
*netdev
= priv
->netdev
;
2498 err
= mlx5e_set_mtu(priv
, netdev
->mtu
);
2502 mlx5e_query_mtu(priv
, &mtu
);
2503 if (mtu
!= netdev
->mtu
)
2504 netdev_warn(netdev
, "%s: VPort MTU %d is different than netdev mtu %d\n",
2505 __func__
, mtu
, netdev
->mtu
);
2511 static void mlx5e_netdev_set_tcs(struct net_device
*netdev
)
2513 struct mlx5e_priv
*priv
= netdev_priv(netdev
);
2514 int nch
= priv
->channels
.params
.num_channels
;
2515 int ntc
= priv
->channels
.params
.num_tc
;
2518 netdev_reset_tc(netdev
);
2523 netdev_set_num_tc(netdev
, ntc
);
2525 /* Map netdev TCs to offset 0
2526 * We have our own UP to TXQ mapping for QoS
2528 for (tc
= 0; tc
< ntc
; tc
++)
2529 netdev_set_tc_queue(netdev
, tc
, nch
, 0);
2532 static void mlx5e_build_channels_tx_maps(struct mlx5e_priv
*priv
)
2534 struct mlx5e_channel
*c
;
2535 struct mlx5e_txqsq
*sq
;
2538 for (i
= 0; i
< priv
->channels
.num
; i
++)
2539 for (tc
= 0; tc
< priv
->profile
->max_tc
; tc
++)
2540 priv
->channel_tc2txq
[i
][tc
] = i
+ tc
* priv
->channels
.num
;
2542 for (i
= 0; i
< priv
->channels
.num
; i
++) {
2543 c
= priv
->channels
.c
[i
];
2544 for (tc
= 0; tc
< c
->num_tc
; tc
++) {
2546 priv
->txq2sq
[sq
->txq_ix
] = sq
;
2551 static bool mlx5e_is_eswitch_vport_mngr(struct mlx5_core_dev
*mdev
)
2553 return (MLX5_CAP_GEN(mdev
, vport_group_manager
) &&
2554 MLX5_CAP_GEN(mdev
, port_type
) == MLX5_CAP_PORT_TYPE_ETH
);
2557 void mlx5e_activate_priv_channels(struct mlx5e_priv
*priv
)
2559 int num_txqs
= priv
->channels
.num
* priv
->channels
.params
.num_tc
;
2560 struct net_device
*netdev
= priv
->netdev
;
2562 mlx5e_netdev_set_tcs(netdev
);
2563 netif_set_real_num_tx_queues(netdev
, num_txqs
);
2564 netif_set_real_num_rx_queues(netdev
, priv
->channels
.num
);
2566 mlx5e_build_channels_tx_maps(priv
);
2567 mlx5e_activate_channels(&priv
->channels
);
2568 netif_tx_start_all_queues(priv
->netdev
);
2570 if (mlx5e_is_eswitch_vport_mngr(priv
->mdev
))
2571 mlx5e_add_sqs_fwd_rules(priv
);
2573 mlx5e_wait_channels_min_rx_wqes(&priv
->channels
);
2574 mlx5e_redirect_rqts_to_channels(priv
, &priv
->channels
);
2577 void mlx5e_deactivate_priv_channels(struct mlx5e_priv
*priv
)
2579 mlx5e_redirect_rqts_to_drop(priv
);
2581 if (mlx5e_is_eswitch_vport_mngr(priv
->mdev
))
2582 mlx5e_remove_sqs_fwd_rules(priv
);
2584 /* FIXME: This is a W/A only for tx timeout watch dog false alarm when
2585 * polling for inactive tx queues.
2587 netif_tx_stop_all_queues(priv
->netdev
);
2588 netif_tx_disable(priv
->netdev
);
2589 mlx5e_deactivate_channels(&priv
->channels
);
2592 void mlx5e_switch_priv_channels(struct mlx5e_priv
*priv
,
2593 struct mlx5e_channels
*new_chs
,
2594 mlx5e_fp_hw_modify hw_modify
)
2596 struct net_device
*netdev
= priv
->netdev
;
2599 new_num_txqs
= new_chs
->num
* new_chs
->params
.num_tc
;
2601 netif_carrier_off(netdev
);
2603 if (new_num_txqs
< netdev
->real_num_tx_queues
)
2604 netif_set_real_num_tx_queues(netdev
, new_num_txqs
);
2606 mlx5e_deactivate_priv_channels(priv
);
2607 mlx5e_close_channels(&priv
->channels
);
2609 priv
->channels
= *new_chs
;
2611 /* New channels are ready to roll, modify HW settings if needed */
2615 mlx5e_refresh_tirs(priv
, false);
2616 mlx5e_activate_priv_channels(priv
);
2618 mlx5e_update_carrier(priv
);
2621 int mlx5e_open_locked(struct net_device
*netdev
)
2623 struct mlx5e_priv
*priv
= netdev_priv(netdev
);
2626 set_bit(MLX5E_STATE_OPENED
, &priv
->state
);
2628 err
= mlx5e_open_channels(priv
, &priv
->channels
);
2630 goto err_clear_state_opened_flag
;
2632 mlx5e_refresh_tirs(priv
, false);
2633 mlx5e_activate_priv_channels(priv
);
2634 mlx5e_update_carrier(priv
);
2635 mlx5e_timestamp_init(priv
);
2637 if (priv
->profile
->update_stats
)
2638 queue_delayed_work(priv
->wq
, &priv
->update_stats_work
, 0);
2642 err_clear_state_opened_flag
:
2643 clear_bit(MLX5E_STATE_OPENED
, &priv
->state
);
2647 int mlx5e_open(struct net_device
*netdev
)
2649 struct mlx5e_priv
*priv
= netdev_priv(netdev
);
2652 mutex_lock(&priv
->state_lock
);
2653 err
= mlx5e_open_locked(netdev
);
2654 mutex_unlock(&priv
->state_lock
);
2659 int mlx5e_close_locked(struct net_device
*netdev
)
2661 struct mlx5e_priv
*priv
= netdev_priv(netdev
);
2663 /* May already be CLOSED in case a previous configuration operation
2664 * (e.g RX/TX queue size change) that involves close&open failed.
2666 if (!test_bit(MLX5E_STATE_OPENED
, &priv
->state
))
2669 clear_bit(MLX5E_STATE_OPENED
, &priv
->state
);
2671 mlx5e_timestamp_cleanup(priv
);
2672 netif_carrier_off(priv
->netdev
);
2673 mlx5e_deactivate_priv_channels(priv
);
2674 mlx5e_close_channels(&priv
->channels
);
2679 int mlx5e_close(struct net_device
*netdev
)
2681 struct mlx5e_priv
*priv
= netdev_priv(netdev
);
2684 if (!netif_device_present(netdev
))
2687 mutex_lock(&priv
->state_lock
);
2688 err
= mlx5e_close_locked(netdev
);
2689 mutex_unlock(&priv
->state_lock
);
2694 static int mlx5e_alloc_drop_rq(struct mlx5_core_dev
*mdev
,
2695 struct mlx5e_rq
*rq
,
2696 struct mlx5e_rq_param
*param
)
2698 void *rqc
= param
->rqc
;
2699 void *rqc_wq
= MLX5_ADDR_OF(rqc
, rqc
, wq
);
2702 param
->wq
.db_numa_node
= param
->wq
.buf_numa_node
;
2704 err
= mlx5_wq_ll_create(mdev
, ¶m
->wq
, rqc_wq
, &rq
->wq
,
2714 static int mlx5e_alloc_drop_cq(struct mlx5_core_dev
*mdev
,
2715 struct mlx5e_cq
*cq
,
2716 struct mlx5e_cq_param
*param
)
2718 return mlx5e_alloc_cq_common(mdev
, param
, cq
);
2721 static int mlx5e_open_drop_rq(struct mlx5_core_dev
*mdev
,
2722 struct mlx5e_rq
*drop_rq
)
2724 struct mlx5e_cq_param cq_param
= {};
2725 struct mlx5e_rq_param rq_param
= {};
2726 struct mlx5e_cq
*cq
= &drop_rq
->cq
;
2729 mlx5e_build_drop_rq_param(&rq_param
);
2731 err
= mlx5e_alloc_drop_cq(mdev
, cq
, &cq_param
);
2735 err
= mlx5e_create_cq(cq
, &cq_param
);
2739 err
= mlx5e_alloc_drop_rq(mdev
, drop_rq
, &rq_param
);
2741 goto err_destroy_cq
;
2743 err
= mlx5e_create_rq(drop_rq
, &rq_param
);
2750 mlx5e_free_rq(drop_rq
);
2753 mlx5e_destroy_cq(cq
);
2761 static void mlx5e_close_drop_rq(struct mlx5e_rq
*drop_rq
)
2763 mlx5e_destroy_rq(drop_rq
);
2764 mlx5e_free_rq(drop_rq
);
2765 mlx5e_destroy_cq(&drop_rq
->cq
);
2766 mlx5e_free_cq(&drop_rq
->cq
);
2769 int mlx5e_create_tis(struct mlx5_core_dev
*mdev
, int tc
,
2770 u32 underlay_qpn
, u32
*tisn
)
2772 u32 in
[MLX5_ST_SZ_DW(create_tis_in
)] = {0};
2773 void *tisc
= MLX5_ADDR_OF(create_tis_in
, in
, ctx
);
2775 MLX5_SET(tisc
, tisc
, prio
, tc
<< 1);
2776 MLX5_SET(tisc
, tisc
, underlay_qpn
, underlay_qpn
);
2777 MLX5_SET(tisc
, tisc
, transport_domain
, mdev
->mlx5e_res
.td
.tdn
);
2779 if (mlx5_lag_is_lacp_owner(mdev
))
2780 MLX5_SET(tisc
, tisc
, strict_lag_tx_port_affinity
, 1);
2782 return mlx5_core_create_tis(mdev
, in
, sizeof(in
), tisn
);
2785 void mlx5e_destroy_tis(struct mlx5_core_dev
*mdev
, u32 tisn
)
2787 mlx5_core_destroy_tis(mdev
, tisn
);
2790 int mlx5e_create_tises(struct mlx5e_priv
*priv
)
2795 for (tc
= 0; tc
< priv
->profile
->max_tc
; tc
++) {
2796 err
= mlx5e_create_tis(priv
->mdev
, tc
, 0, &priv
->tisn
[tc
]);
2798 goto err_close_tises
;
2804 for (tc
--; tc
>= 0; tc
--)
2805 mlx5e_destroy_tis(priv
->mdev
, priv
->tisn
[tc
]);
2810 void mlx5e_cleanup_nic_tx(struct mlx5e_priv
*priv
)
2814 for (tc
= 0; tc
< priv
->profile
->max_tc
; tc
++)
2815 mlx5e_destroy_tis(priv
->mdev
, priv
->tisn
[tc
]);
2818 static void mlx5e_build_indir_tir_ctx(struct mlx5e_priv
*priv
,
2819 enum mlx5e_traffic_types tt
,
2822 MLX5_SET(tirc
, tirc
, transport_domain
, priv
->mdev
->mlx5e_res
.td
.tdn
);
2824 mlx5e_build_tir_ctx_lro(&priv
->channels
.params
, tirc
);
2826 MLX5_SET(tirc
, tirc
, disp_type
, MLX5_TIRC_DISP_TYPE_INDIRECT
);
2827 MLX5_SET(tirc
, tirc
, indirect_table
, priv
->indir_rqt
.rqtn
);
2828 mlx5e_build_indir_tir_ctx_hash(&priv
->channels
.params
, tt
, tirc
);
2831 static void mlx5e_build_direct_tir_ctx(struct mlx5e_priv
*priv
, u32 rqtn
, u32
*tirc
)
2833 MLX5_SET(tirc
, tirc
, transport_domain
, priv
->mdev
->mlx5e_res
.td
.tdn
);
2835 mlx5e_build_tir_ctx_lro(&priv
->channels
.params
, tirc
);
2837 MLX5_SET(tirc
, tirc
, disp_type
, MLX5_TIRC_DISP_TYPE_INDIRECT
);
2838 MLX5_SET(tirc
, tirc
, indirect_table
, rqtn
);
2839 MLX5_SET(tirc
, tirc
, rx_hash_fn
, MLX5_RX_HASH_FN_INVERTED_XOR8
);
2842 int mlx5e_create_indirect_tirs(struct mlx5e_priv
*priv
)
2844 struct mlx5e_tir
*tir
;
2851 inlen
= MLX5_ST_SZ_BYTES(create_tir_in
);
2852 in
= mlx5_vzalloc(inlen
);
2856 for (tt
= 0; tt
< MLX5E_NUM_INDIR_TIRS
; tt
++) {
2857 memset(in
, 0, inlen
);
2858 tir
= &priv
->indir_tir
[tt
];
2859 tirc
= MLX5_ADDR_OF(create_tir_in
, in
, ctx
);
2860 mlx5e_build_indir_tir_ctx(priv
, tt
, tirc
);
2861 err
= mlx5e_create_tir(priv
->mdev
, tir
, in
, inlen
);
2863 goto err_destroy_tirs
;
2871 mlx5_core_warn(priv
->mdev
, "create indirect tirs failed, %d\n", err
);
2872 for (tt
--; tt
>= 0; tt
--)
2873 mlx5e_destroy_tir(priv
->mdev
, &priv
->indir_tir
[tt
]);
2880 int mlx5e_create_direct_tirs(struct mlx5e_priv
*priv
)
2882 int nch
= priv
->profile
->max_nch(priv
->mdev
);
2883 struct mlx5e_tir
*tir
;
2890 inlen
= MLX5_ST_SZ_BYTES(create_tir_in
);
2891 in
= mlx5_vzalloc(inlen
);
2895 for (ix
= 0; ix
< nch
; ix
++) {
2896 memset(in
, 0, inlen
);
2897 tir
= &priv
->direct_tir
[ix
];
2898 tirc
= MLX5_ADDR_OF(create_tir_in
, in
, ctx
);
2899 mlx5e_build_direct_tir_ctx(priv
, priv
->direct_tir
[ix
].rqt
.rqtn
, tirc
);
2900 err
= mlx5e_create_tir(priv
->mdev
, tir
, in
, inlen
);
2902 goto err_destroy_ch_tirs
;
2909 err_destroy_ch_tirs
:
2910 mlx5_core_warn(priv
->mdev
, "create direct tirs failed, %d\n", err
);
2911 for (ix
--; ix
>= 0; ix
--)
2912 mlx5e_destroy_tir(priv
->mdev
, &priv
->direct_tir
[ix
]);
2919 void mlx5e_destroy_indirect_tirs(struct mlx5e_priv
*priv
)
2923 for (i
= 0; i
< MLX5E_NUM_INDIR_TIRS
; i
++)
2924 mlx5e_destroy_tir(priv
->mdev
, &priv
->indir_tir
[i
]);
2927 void mlx5e_destroy_direct_tirs(struct mlx5e_priv
*priv
)
2929 int nch
= priv
->profile
->max_nch(priv
->mdev
);
2932 for (i
= 0; i
< nch
; i
++)
2933 mlx5e_destroy_tir(priv
->mdev
, &priv
->direct_tir
[i
]);
2936 static int mlx5e_modify_channels_scatter_fcs(struct mlx5e_channels
*chs
, bool enable
)
2941 for (i
= 0; i
< chs
->num
; i
++) {
2942 err
= mlx5e_modify_rq_scatter_fcs(&chs
->c
[i
]->rq
, enable
);
2950 static int mlx5e_modify_channels_vsd(struct mlx5e_channels
*chs
, bool vsd
)
2955 for (i
= 0; i
< chs
->num
; i
++) {
2956 err
= mlx5e_modify_rq_vsd(&chs
->c
[i
]->rq
, vsd
);
2964 static int mlx5e_setup_tc(struct net_device
*netdev
, u8 tc
)
2966 struct mlx5e_priv
*priv
= netdev_priv(netdev
);
2967 struct mlx5e_channels new_channels
= {};
2970 if (tc
&& tc
!= MLX5E_MAX_NUM_TC
)
2973 mutex_lock(&priv
->state_lock
);
2975 new_channels
.params
= priv
->channels
.params
;
2976 new_channels
.params
.num_tc
= tc
? tc
: 1;
2978 if (test_bit(MLX5E_STATE_OPENED
, &priv
->state
)) {
2979 priv
->channels
.params
= new_channels
.params
;
2983 err
= mlx5e_open_channels(priv
, &new_channels
);
2987 mlx5e_switch_priv_channels(priv
, &new_channels
, NULL
);
2989 mutex_unlock(&priv
->state_lock
);
2993 static int mlx5e_ndo_setup_tc(struct net_device
*dev
, u32 handle
,
2994 __be16 proto
, struct tc_to_netdev
*tc
)
2996 struct mlx5e_priv
*priv
= netdev_priv(dev
);
2998 if (TC_H_MAJ(handle
) != TC_H_MAJ(TC_H_INGRESS
))
3002 case TC_SETUP_CLSFLOWER
:
3003 switch (tc
->cls_flower
->command
) {
3004 case TC_CLSFLOWER_REPLACE
:
3005 return mlx5e_configure_flower(priv
, proto
, tc
->cls_flower
);
3006 case TC_CLSFLOWER_DESTROY
:
3007 return mlx5e_delete_flower(priv
, tc
->cls_flower
);
3008 case TC_CLSFLOWER_STATS
:
3009 return mlx5e_stats_flower(priv
, tc
->cls_flower
);
3016 if (tc
->type
!= TC_SETUP_MQPRIO
)
3019 tc
->mqprio
->hw
= TC_MQPRIO_HW_OFFLOAD_TCS
;
3021 return mlx5e_setup_tc(dev
, tc
->mqprio
->num_tc
);
3025 mlx5e_get_stats(struct net_device
*dev
, struct rtnl_link_stats64
*stats
)
3027 struct mlx5e_priv
*priv
= netdev_priv(dev
);
3028 struct mlx5e_sw_stats
*sstats
= &priv
->stats
.sw
;
3029 struct mlx5e_vport_stats
*vstats
= &priv
->stats
.vport
;
3030 struct mlx5e_pport_stats
*pstats
= &priv
->stats
.pport
;
3032 if (mlx5e_is_uplink_rep(priv
)) {
3033 stats
->rx_packets
= PPORT_802_3_GET(pstats
, a_frames_received_ok
);
3034 stats
->rx_bytes
= PPORT_802_3_GET(pstats
, a_octets_received_ok
);
3035 stats
->tx_packets
= PPORT_802_3_GET(pstats
, a_frames_transmitted_ok
);
3036 stats
->tx_bytes
= PPORT_802_3_GET(pstats
, a_octets_transmitted_ok
);
3038 stats
->rx_packets
= sstats
->rx_packets
;
3039 stats
->rx_bytes
= sstats
->rx_bytes
;
3040 stats
->tx_packets
= sstats
->tx_packets
;
3041 stats
->tx_bytes
= sstats
->tx_bytes
;
3042 stats
->tx_dropped
= sstats
->tx_queue_dropped
;
3045 stats
->rx_dropped
= priv
->stats
.qcnt
.rx_out_of_buffer
;
3047 stats
->rx_length_errors
=
3048 PPORT_802_3_GET(pstats
, a_in_range_length_errors
) +
3049 PPORT_802_3_GET(pstats
, a_out_of_range_length_field
) +
3050 PPORT_802_3_GET(pstats
, a_frame_too_long_errors
);
3051 stats
->rx_crc_errors
=
3052 PPORT_802_3_GET(pstats
, a_frame_check_sequence_errors
);
3053 stats
->rx_frame_errors
= PPORT_802_3_GET(pstats
, a_alignment_errors
);
3054 stats
->tx_aborted_errors
= PPORT_2863_GET(pstats
, if_out_discards
);
3055 stats
->tx_carrier_errors
=
3056 PPORT_802_3_GET(pstats
, a_symbol_error_during_carrier
);
3057 stats
->rx_errors
= stats
->rx_length_errors
+ stats
->rx_crc_errors
+
3058 stats
->rx_frame_errors
;
3059 stats
->tx_errors
= stats
->tx_aborted_errors
+ stats
->tx_carrier_errors
;
3061 /* vport multicast also counts packets that are dropped due to steering
3062 * or rx out of buffer
3065 VPORT_COUNTER_GET(vstats
, received_eth_multicast
.packets
);
3069 static void mlx5e_set_rx_mode(struct net_device
*dev
)
3071 struct mlx5e_priv
*priv
= netdev_priv(dev
);
3073 queue_work(priv
->wq
, &priv
->set_rx_mode_work
);
3076 static int mlx5e_set_mac(struct net_device
*netdev
, void *addr
)
3078 struct mlx5e_priv
*priv
= netdev_priv(netdev
);
3079 struct sockaddr
*saddr
= addr
;
3081 if (!is_valid_ether_addr(saddr
->sa_data
))
3082 return -EADDRNOTAVAIL
;
3084 netif_addr_lock_bh(netdev
);
3085 ether_addr_copy(netdev
->dev_addr
, saddr
->sa_data
);
3086 netif_addr_unlock_bh(netdev
);
3088 queue_work(priv
->wq
, &priv
->set_rx_mode_work
);
3093 #define MLX5E_SET_FEATURE(netdev, feature, enable) \
3096 netdev->features |= feature; \
3098 netdev->features &= ~feature; \
3101 typedef int (*mlx5e_feature_handler
)(struct net_device
*netdev
, bool enable
);
3103 static int set_feature_lro(struct net_device
*netdev
, bool enable
)
3105 struct mlx5e_priv
*priv
= netdev_priv(netdev
);
3106 struct mlx5e_channels new_channels
= {};
3110 mutex_lock(&priv
->state_lock
);
3112 reset
= (priv
->channels
.params
.rq_wq_type
== MLX5_WQ_TYPE_LINKED_LIST
);
3113 reset
= reset
&& test_bit(MLX5E_STATE_OPENED
, &priv
->state
);
3115 new_channels
.params
= priv
->channels
.params
;
3116 new_channels
.params
.lro_en
= enable
;
3119 priv
->channels
.params
= new_channels
.params
;
3120 err
= mlx5e_modify_tirs_lro(priv
);
3124 err
= mlx5e_open_channels(priv
, &new_channels
);
3128 mlx5e_switch_priv_channels(priv
, &new_channels
, mlx5e_modify_tirs_lro
);
3130 mutex_unlock(&priv
->state_lock
);
3134 static int set_feature_vlan_filter(struct net_device
*netdev
, bool enable
)
3136 struct mlx5e_priv
*priv
= netdev_priv(netdev
);
3139 mlx5e_enable_vlan_filter(priv
);
3141 mlx5e_disable_vlan_filter(priv
);
3146 static int set_feature_tc_num_filters(struct net_device
*netdev
, bool enable
)
3148 struct mlx5e_priv
*priv
= netdev_priv(netdev
);
3150 if (!enable
&& mlx5e_tc_num_filters(priv
)) {
3152 "Active offloaded tc filters, can't turn hw_tc_offload off\n");
3159 static int set_feature_rx_all(struct net_device
*netdev
, bool enable
)
3161 struct mlx5e_priv
*priv
= netdev_priv(netdev
);
3162 struct mlx5_core_dev
*mdev
= priv
->mdev
;
3164 return mlx5_set_port_fcs(mdev
, !enable
);
3167 static int set_feature_rx_fcs(struct net_device
*netdev
, bool enable
)
3169 struct mlx5e_priv
*priv
= netdev_priv(netdev
);
3172 mutex_lock(&priv
->state_lock
);
3174 priv
->channels
.params
.scatter_fcs_en
= enable
;
3175 err
= mlx5e_modify_channels_scatter_fcs(&priv
->channels
, enable
);
3177 priv
->channels
.params
.scatter_fcs_en
= !enable
;
3179 mutex_unlock(&priv
->state_lock
);
3184 static int set_feature_rx_vlan(struct net_device
*netdev
, bool enable
)
3186 struct mlx5e_priv
*priv
= netdev_priv(netdev
);
3189 mutex_lock(&priv
->state_lock
);
3191 priv
->channels
.params
.vlan_strip_disable
= !enable
;
3192 if (!test_bit(MLX5E_STATE_OPENED
, &priv
->state
))
3195 err
= mlx5e_modify_channels_vsd(&priv
->channels
, !enable
);
3197 priv
->channels
.params
.vlan_strip_disable
= enable
;
3200 mutex_unlock(&priv
->state_lock
);
3205 #ifdef CONFIG_RFS_ACCEL
3206 static int set_feature_arfs(struct net_device
*netdev
, bool enable
)
3208 struct mlx5e_priv
*priv
= netdev_priv(netdev
);
3212 err
= mlx5e_arfs_enable(priv
);
3214 err
= mlx5e_arfs_disable(priv
);
3220 static int mlx5e_handle_feature(struct net_device
*netdev
,
3221 netdev_features_t wanted_features
,
3222 netdev_features_t feature
,
3223 mlx5e_feature_handler feature_handler
)
3225 netdev_features_t changes
= wanted_features
^ netdev
->features
;
3226 bool enable
= !!(wanted_features
& feature
);
3229 if (!(changes
& feature
))
3232 err
= feature_handler(netdev
, enable
);
3234 netdev_err(netdev
, "%s feature 0x%llx failed err %d\n",
3235 enable
? "Enable" : "Disable", feature
, err
);
3239 MLX5E_SET_FEATURE(netdev
, feature
, enable
);
3243 static int mlx5e_set_features(struct net_device
*netdev
,
3244 netdev_features_t features
)
3248 err
= mlx5e_handle_feature(netdev
, features
, NETIF_F_LRO
,
3250 err
|= mlx5e_handle_feature(netdev
, features
,
3251 NETIF_F_HW_VLAN_CTAG_FILTER
,
3252 set_feature_vlan_filter
);
3253 err
|= mlx5e_handle_feature(netdev
, features
, NETIF_F_HW_TC
,
3254 set_feature_tc_num_filters
);
3255 err
|= mlx5e_handle_feature(netdev
, features
, NETIF_F_RXALL
,
3256 set_feature_rx_all
);
3257 err
|= mlx5e_handle_feature(netdev
, features
, NETIF_F_RXFCS
,
3258 set_feature_rx_fcs
);
3259 err
|= mlx5e_handle_feature(netdev
, features
, NETIF_F_HW_VLAN_CTAG_RX
,
3260 set_feature_rx_vlan
);
3261 #ifdef CONFIG_RFS_ACCEL
3262 err
|= mlx5e_handle_feature(netdev
, features
, NETIF_F_NTUPLE
,
3266 return err
? -EINVAL
: 0;
3269 static int mlx5e_change_mtu(struct net_device
*netdev
, int new_mtu
)
3271 struct mlx5e_priv
*priv
= netdev_priv(netdev
);
3272 struct mlx5e_channels new_channels
= {};
3277 mutex_lock(&priv
->state_lock
);
3279 reset
= !priv
->channels
.params
.lro_en
&&
3280 (priv
->channels
.params
.rq_wq_type
!=
3281 MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ
);
3283 reset
= reset
&& test_bit(MLX5E_STATE_OPENED
, &priv
->state
);
3285 curr_mtu
= netdev
->mtu
;
3286 netdev
->mtu
= new_mtu
;
3289 mlx5e_set_dev_port_mtu(priv
);
3293 new_channels
.params
= priv
->channels
.params
;
3294 err
= mlx5e_open_channels(priv
, &new_channels
);
3296 netdev
->mtu
= curr_mtu
;
3300 mlx5e_switch_priv_channels(priv
, &new_channels
, mlx5e_set_dev_port_mtu
);
3303 mutex_unlock(&priv
->state_lock
);
3307 static int mlx5e_ioctl(struct net_device
*dev
, struct ifreq
*ifr
, int cmd
)
3311 return mlx5e_hwstamp_set(dev
, ifr
);
3313 return mlx5e_hwstamp_get(dev
, ifr
);
3319 static int mlx5e_set_vf_mac(struct net_device
*dev
, int vf
, u8
*mac
)
3321 struct mlx5e_priv
*priv
= netdev_priv(dev
);
3322 struct mlx5_core_dev
*mdev
= priv
->mdev
;
3324 return mlx5_eswitch_set_vport_mac(mdev
->priv
.eswitch
, vf
+ 1, mac
);
3327 static int mlx5e_set_vf_vlan(struct net_device
*dev
, int vf
, u16 vlan
, u8 qos
,
3330 struct mlx5e_priv
*priv
= netdev_priv(dev
);
3331 struct mlx5_core_dev
*mdev
= priv
->mdev
;
3333 if (vlan_proto
!= htons(ETH_P_8021Q
))
3334 return -EPROTONOSUPPORT
;
3336 return mlx5_eswitch_set_vport_vlan(mdev
->priv
.eswitch
, vf
+ 1,
3340 static int mlx5e_set_vf_spoofchk(struct net_device
*dev
, int vf
, bool setting
)
3342 struct mlx5e_priv
*priv
= netdev_priv(dev
);
3343 struct mlx5_core_dev
*mdev
= priv
->mdev
;
3345 return mlx5_eswitch_set_vport_spoofchk(mdev
->priv
.eswitch
, vf
+ 1, setting
);
3348 static int mlx5e_set_vf_trust(struct net_device
*dev
, int vf
, bool setting
)
3350 struct mlx5e_priv
*priv
= netdev_priv(dev
);
3351 struct mlx5_core_dev
*mdev
= priv
->mdev
;
3353 return mlx5_eswitch_set_vport_trust(mdev
->priv
.eswitch
, vf
+ 1, setting
);
3356 static int mlx5e_set_vf_rate(struct net_device
*dev
, int vf
, int min_tx_rate
,
3359 struct mlx5e_priv
*priv
= netdev_priv(dev
);
3360 struct mlx5_core_dev
*mdev
= priv
->mdev
;
3362 return mlx5_eswitch_set_vport_rate(mdev
->priv
.eswitch
, vf
+ 1,
3363 max_tx_rate
, min_tx_rate
);
3366 static int mlx5_vport_link2ifla(u8 esw_link
)
3369 case MLX5_ESW_VPORT_ADMIN_STATE_DOWN
:
3370 return IFLA_VF_LINK_STATE_DISABLE
;
3371 case MLX5_ESW_VPORT_ADMIN_STATE_UP
:
3372 return IFLA_VF_LINK_STATE_ENABLE
;
3374 return IFLA_VF_LINK_STATE_AUTO
;
3377 static int mlx5_ifla_link2vport(u8 ifla_link
)
3379 switch (ifla_link
) {
3380 case IFLA_VF_LINK_STATE_DISABLE
:
3381 return MLX5_ESW_VPORT_ADMIN_STATE_DOWN
;
3382 case IFLA_VF_LINK_STATE_ENABLE
:
3383 return MLX5_ESW_VPORT_ADMIN_STATE_UP
;
3385 return MLX5_ESW_VPORT_ADMIN_STATE_AUTO
;
3388 static int mlx5e_set_vf_link_state(struct net_device
*dev
, int vf
,
3391 struct mlx5e_priv
*priv
= netdev_priv(dev
);
3392 struct mlx5_core_dev
*mdev
= priv
->mdev
;
3394 return mlx5_eswitch_set_vport_state(mdev
->priv
.eswitch
, vf
+ 1,
3395 mlx5_ifla_link2vport(link_state
));
3398 static int mlx5e_get_vf_config(struct net_device
*dev
,
3399 int vf
, struct ifla_vf_info
*ivi
)
3401 struct mlx5e_priv
*priv
= netdev_priv(dev
);
3402 struct mlx5_core_dev
*mdev
= priv
->mdev
;
3405 err
= mlx5_eswitch_get_vport_config(mdev
->priv
.eswitch
, vf
+ 1, ivi
);
3408 ivi
->linkstate
= mlx5_vport_link2ifla(ivi
->linkstate
);
3412 static int mlx5e_get_vf_stats(struct net_device
*dev
,
3413 int vf
, struct ifla_vf_stats
*vf_stats
)
3415 struct mlx5e_priv
*priv
= netdev_priv(dev
);
3416 struct mlx5_core_dev
*mdev
= priv
->mdev
;
3418 return mlx5_eswitch_get_vport_stats(mdev
->priv
.eswitch
, vf
+ 1,
3422 static void mlx5e_add_vxlan_port(struct net_device
*netdev
,
3423 struct udp_tunnel_info
*ti
)
3425 struct mlx5e_priv
*priv
= netdev_priv(netdev
);
3427 if (ti
->type
!= UDP_TUNNEL_TYPE_VXLAN
)
3430 if (!mlx5e_vxlan_allowed(priv
->mdev
))
3433 mlx5e_vxlan_queue_work(priv
, ti
->sa_family
, be16_to_cpu(ti
->port
), 1);
3436 static void mlx5e_del_vxlan_port(struct net_device
*netdev
,
3437 struct udp_tunnel_info
*ti
)
3439 struct mlx5e_priv
*priv
= netdev_priv(netdev
);
3441 if (ti
->type
!= UDP_TUNNEL_TYPE_VXLAN
)
3444 if (!mlx5e_vxlan_allowed(priv
->mdev
))
3447 mlx5e_vxlan_queue_work(priv
, ti
->sa_family
, be16_to_cpu(ti
->port
), 0);
3450 static netdev_features_t
mlx5e_vxlan_features_check(struct mlx5e_priv
*priv
,
3451 struct sk_buff
*skb
,
3452 netdev_features_t features
)
3454 struct udphdr
*udph
;
3458 switch (vlan_get_protocol(skb
)) {
3459 case htons(ETH_P_IP
):
3460 proto
= ip_hdr(skb
)->protocol
;
3462 case htons(ETH_P_IPV6
):
3463 proto
= ipv6_hdr(skb
)->nexthdr
;
3469 if (proto
== IPPROTO_UDP
) {
3470 udph
= udp_hdr(skb
);
3471 port
= be16_to_cpu(udph
->dest
);
3474 /* Verify if UDP port is being offloaded by HW */
3475 if (port
&& mlx5e_vxlan_lookup_port(priv
, port
))
3479 /* Disable CSUM and GSO if the udp dport is not offloaded by HW */
3480 return features
& ~(NETIF_F_CSUM_MASK
| NETIF_F_GSO_MASK
);
3483 static netdev_features_t
mlx5e_features_check(struct sk_buff
*skb
,
3484 struct net_device
*netdev
,
3485 netdev_features_t features
)
3487 struct mlx5e_priv
*priv
= netdev_priv(netdev
);
3489 features
= vlan_features_check(skb
, features
);
3490 features
= vxlan_features_check(skb
, features
);
3492 /* Validate if the tunneled packet is being offloaded by HW */
3493 if (skb
->encapsulation
&&
3494 (features
& NETIF_F_CSUM_MASK
|| features
& NETIF_F_GSO_MASK
))
3495 return mlx5e_vxlan_features_check(priv
, skb
, features
);
3500 static void mlx5e_tx_timeout(struct net_device
*dev
)
3502 struct mlx5e_priv
*priv
= netdev_priv(dev
);
3503 bool sched_work
= false;
3506 netdev_err(dev
, "TX timeout detected\n");
3508 for (i
= 0; i
< priv
->channels
.num
* priv
->channels
.params
.num_tc
; i
++) {
3509 struct mlx5e_txqsq
*sq
= priv
->txq2sq
[i
];
3511 if (!netif_xmit_stopped(netdev_get_tx_queue(dev
, i
)))
3514 clear_bit(MLX5E_SQ_STATE_ENABLED
, &sq
->state
);
3515 netdev_err(dev
, "TX timeout on queue: %d, SQ: 0x%x, CQ: 0x%x, SQ Cons: 0x%x SQ Prod: 0x%x\n",
3516 i
, sq
->sqn
, sq
->cq
.mcq
.cqn
, sq
->cc
, sq
->pc
);
3519 if (sched_work
&& test_bit(MLX5E_STATE_OPENED
, &priv
->state
))
3520 schedule_work(&priv
->tx_timeout_work
);
3523 static int mlx5e_xdp_set(struct net_device
*netdev
, struct bpf_prog
*prog
)
3525 struct mlx5e_priv
*priv
= netdev_priv(netdev
);
3526 struct bpf_prog
*old_prog
;
3528 bool reset
, was_opened
;
3531 mutex_lock(&priv
->state_lock
);
3533 if ((netdev
->features
& NETIF_F_LRO
) && prog
) {
3534 netdev_warn(netdev
, "can't set XDP while LRO is on, disable LRO first\n");
3539 was_opened
= test_bit(MLX5E_STATE_OPENED
, &priv
->state
);
3540 /* no need for full reset when exchanging programs */
3541 reset
= (!priv
->channels
.params
.xdp_prog
|| !prog
);
3543 if (was_opened
&& reset
)
3544 mlx5e_close_locked(netdev
);
3545 if (was_opened
&& !reset
) {
3546 /* num_channels is invariant here, so we can take the
3547 * batched reference right upfront.
3549 prog
= bpf_prog_add(prog
, priv
->channels
.num
);
3551 err
= PTR_ERR(prog
);
3556 /* exchange programs, extra prog reference we got from caller
3557 * as long as we don't fail from this point onwards.
3559 old_prog
= xchg(&priv
->channels
.params
.xdp_prog
, prog
);
3561 bpf_prog_put(old_prog
);
3563 if (reset
) /* change RQ type according to priv->xdp_prog */
3564 mlx5e_set_rq_params(priv
->mdev
, &priv
->channels
.params
);
3566 if (was_opened
&& reset
)
3567 mlx5e_open_locked(netdev
);
3569 if (!test_bit(MLX5E_STATE_OPENED
, &priv
->state
) || reset
)
3572 /* exchanging programs w/o reset, we update ref counts on behalf
3573 * of the channels RQs here.
3575 for (i
= 0; i
< priv
->channels
.num
; i
++) {
3576 struct mlx5e_channel
*c
= priv
->channels
.c
[i
];
3578 clear_bit(MLX5E_RQ_STATE_ENABLED
, &c
->rq
.state
);
3579 napi_synchronize(&c
->napi
);
3580 /* prevent mlx5e_poll_rx_cq from accessing rq->xdp_prog */
3582 old_prog
= xchg(&c
->rq
.xdp_prog
, prog
);
3584 set_bit(MLX5E_RQ_STATE_ENABLED
, &c
->rq
.state
);
3585 /* napi_schedule in case we have missed anything */
3586 set_bit(MLX5E_CHANNEL_NAPI_SCHED
, &c
->flags
);
3587 napi_schedule(&c
->napi
);
3590 bpf_prog_put(old_prog
);
3594 mutex_unlock(&priv
->state_lock
);
3598 static bool mlx5e_xdp_attached(struct net_device
*dev
)
3600 struct mlx5e_priv
*priv
= netdev_priv(dev
);
3602 return !!priv
->channels
.params
.xdp_prog
;
3605 static int mlx5e_xdp(struct net_device
*dev
, struct netdev_xdp
*xdp
)
3607 switch (xdp
->command
) {
3608 case XDP_SETUP_PROG
:
3609 return mlx5e_xdp_set(dev
, xdp
->prog
);
3610 case XDP_QUERY_PROG
:
3611 xdp
->prog_attached
= mlx5e_xdp_attached(dev
);
3618 #ifdef CONFIG_NET_POLL_CONTROLLER
3619 /* Fake "interrupt" called by netpoll (eg netconsole) to send skbs without
3620 * reenabling interrupts.
3622 static void mlx5e_netpoll(struct net_device
*dev
)
3624 struct mlx5e_priv
*priv
= netdev_priv(dev
);
3625 struct mlx5e_channels
*chs
= &priv
->channels
;
3629 for (i
= 0; i
< chs
->num
; i
++)
3630 napi_schedule(&chs
->c
[i
]->napi
);
3634 static const struct net_device_ops mlx5e_netdev_ops_basic
= {
3635 .ndo_open
= mlx5e_open
,
3636 .ndo_stop
= mlx5e_close
,
3637 .ndo_start_xmit
= mlx5e_xmit
,
3638 .ndo_setup_tc
= mlx5e_ndo_setup_tc
,
3639 .ndo_select_queue
= mlx5e_select_queue
,
3640 .ndo_get_stats64
= mlx5e_get_stats
,
3641 .ndo_set_rx_mode
= mlx5e_set_rx_mode
,
3642 .ndo_set_mac_address
= mlx5e_set_mac
,
3643 .ndo_vlan_rx_add_vid
= mlx5e_vlan_rx_add_vid
,
3644 .ndo_vlan_rx_kill_vid
= mlx5e_vlan_rx_kill_vid
,
3645 .ndo_set_features
= mlx5e_set_features
,
3646 .ndo_change_mtu
= mlx5e_change_mtu
,
3647 .ndo_do_ioctl
= mlx5e_ioctl
,
3648 .ndo_set_tx_maxrate
= mlx5e_set_tx_maxrate
,
3649 #ifdef CONFIG_RFS_ACCEL
3650 .ndo_rx_flow_steer
= mlx5e_rx_flow_steer
,
3652 .ndo_tx_timeout
= mlx5e_tx_timeout
,
3653 .ndo_xdp
= mlx5e_xdp
,
3654 #ifdef CONFIG_NET_POLL_CONTROLLER
3655 .ndo_poll_controller
= mlx5e_netpoll
,
3659 static const struct net_device_ops mlx5e_netdev_ops_sriov
= {
3660 .ndo_open
= mlx5e_open
,
3661 .ndo_stop
= mlx5e_close
,
3662 .ndo_start_xmit
= mlx5e_xmit
,
3663 .ndo_setup_tc
= mlx5e_ndo_setup_tc
,
3664 .ndo_select_queue
= mlx5e_select_queue
,
3665 .ndo_get_stats64
= mlx5e_get_stats
,
3666 .ndo_set_rx_mode
= mlx5e_set_rx_mode
,
3667 .ndo_set_mac_address
= mlx5e_set_mac
,
3668 .ndo_vlan_rx_add_vid
= mlx5e_vlan_rx_add_vid
,
3669 .ndo_vlan_rx_kill_vid
= mlx5e_vlan_rx_kill_vid
,
3670 .ndo_set_features
= mlx5e_set_features
,
3671 .ndo_change_mtu
= mlx5e_change_mtu
,
3672 .ndo_do_ioctl
= mlx5e_ioctl
,
3673 .ndo_udp_tunnel_add
= mlx5e_add_vxlan_port
,
3674 .ndo_udp_tunnel_del
= mlx5e_del_vxlan_port
,
3675 .ndo_set_tx_maxrate
= mlx5e_set_tx_maxrate
,
3676 .ndo_features_check
= mlx5e_features_check
,
3677 #ifdef CONFIG_RFS_ACCEL
3678 .ndo_rx_flow_steer
= mlx5e_rx_flow_steer
,
3680 .ndo_set_vf_mac
= mlx5e_set_vf_mac
,
3681 .ndo_set_vf_vlan
= mlx5e_set_vf_vlan
,
3682 .ndo_set_vf_spoofchk
= mlx5e_set_vf_spoofchk
,
3683 .ndo_set_vf_trust
= mlx5e_set_vf_trust
,
3684 .ndo_set_vf_rate
= mlx5e_set_vf_rate
,
3685 .ndo_get_vf_config
= mlx5e_get_vf_config
,
3686 .ndo_set_vf_link_state
= mlx5e_set_vf_link_state
,
3687 .ndo_get_vf_stats
= mlx5e_get_vf_stats
,
3688 .ndo_tx_timeout
= mlx5e_tx_timeout
,
3689 .ndo_xdp
= mlx5e_xdp
,
3690 #ifdef CONFIG_NET_POLL_CONTROLLER
3691 .ndo_poll_controller
= mlx5e_netpoll
,
3693 .ndo_has_offload_stats
= mlx5e_has_offload_stats
,
3694 .ndo_get_offload_stats
= mlx5e_get_offload_stats
,
3697 static int mlx5e_check_required_hca_cap(struct mlx5_core_dev
*mdev
)
3699 if (MLX5_CAP_GEN(mdev
, port_type
) != MLX5_CAP_PORT_TYPE_ETH
)
3701 if (!MLX5_CAP_GEN(mdev
, eth_net_offloads
) ||
3702 !MLX5_CAP_GEN(mdev
, nic_flow_table
) ||
3703 !MLX5_CAP_ETH(mdev
, csum_cap
) ||
3704 !MLX5_CAP_ETH(mdev
, max_lso_cap
) ||
3705 !MLX5_CAP_ETH(mdev
, vlan_cap
) ||
3706 !MLX5_CAP_ETH(mdev
, rss_ind_tbl_cap
) ||
3707 MLX5_CAP_FLOWTABLE(mdev
,
3708 flow_table_properties_nic_receive
.max_ft_level
)
3710 mlx5_core_warn(mdev
,
3711 "Not creating net device, some required device capabilities are missing\n");
3714 if (!MLX5_CAP_ETH(mdev
, self_lb_en_modifiable
))
3715 mlx5_core_warn(mdev
, "Self loop back prevention is not supported\n");
3716 if (!MLX5_CAP_GEN(mdev
, cq_moderation
))
3717 mlx5_core_warn(mdev
, "CQ modiration is not supported\n");
3722 u16
mlx5e_get_max_inline_cap(struct mlx5_core_dev
*mdev
)
3724 int bf_buf_size
= (1 << MLX5_CAP_GEN(mdev
, log_bf_reg_size
)) / 2;
3726 return bf_buf_size
-
3727 sizeof(struct mlx5e_tx_wqe
) +
3728 2 /*sizeof(mlx5e_tx_wqe.inline_hdr_start)*/;
3731 void mlx5e_build_default_indir_rqt(struct mlx5_core_dev
*mdev
,
3732 u32
*indirection_rqt
, int len
,
3735 int node
= mdev
->priv
.numa_node
;
3736 int node_num_of_cores
;
3740 node
= first_online_node
;
3742 node_num_of_cores
= cpumask_weight(cpumask_of_node(node
));
3744 if (node_num_of_cores
)
3745 num_channels
= min_t(int, num_channels
, node_num_of_cores
);
3747 for (i
= 0; i
< len
; i
++)
3748 indirection_rqt
[i
] = i
% num_channels
;
3751 static int mlx5e_get_pci_bw(struct mlx5_core_dev
*mdev
, u32
*pci_bw
)
3753 enum pcie_link_width width
;
3754 enum pci_bus_speed speed
;
3757 err
= pcie_get_minimum_link(mdev
->pdev
, &speed
, &width
);
3761 if (speed
== PCI_SPEED_UNKNOWN
|| width
== PCIE_LNK_WIDTH_UNKNOWN
)
3765 case PCIE_SPEED_2_5GT
:
3766 *pci_bw
= 2500 * width
;
3768 case PCIE_SPEED_5_0GT
:
3769 *pci_bw
= 5000 * width
;
3771 case PCIE_SPEED_8_0GT
:
3772 *pci_bw
= 8000 * width
;
3781 static bool cqe_compress_heuristic(u32 link_speed
, u32 pci_bw
)
3783 return (link_speed
&& pci_bw
&&
3784 (pci_bw
< 40000) && (pci_bw
< link_speed
));
3787 void mlx5e_set_rx_cq_mode_params(struct mlx5e_params
*params
, u8 cq_period_mode
)
3789 params
->rx_cq_period_mode
= cq_period_mode
;
3791 params
->rx_cq_moderation
.pkts
=
3792 MLX5E_PARAMS_DEFAULT_RX_CQ_MODERATION_PKTS
;
3793 params
->rx_cq_moderation
.usec
=
3794 MLX5E_PARAMS_DEFAULT_RX_CQ_MODERATION_USEC
;
3796 if (cq_period_mode
== MLX5_CQ_PERIOD_MODE_START_FROM_CQE
)
3797 params
->rx_cq_moderation
.usec
=
3798 MLX5E_PARAMS_DEFAULT_RX_CQ_MODERATION_USEC_FROM_CQE
;
3800 if (params
->rx_am_enabled
)
3801 params
->rx_cq_moderation
=
3802 mlx5e_am_get_def_profile(params
->rx_cq_period_mode
);
3804 MLX5E_SET_PFLAG(params
, MLX5E_PFLAG_RX_CQE_BASED_MODER
,
3805 params
->rx_cq_period_mode
== MLX5_CQ_PERIOD_MODE_START_FROM_CQE
);
3808 u32
mlx5e_choose_lro_timeout(struct mlx5_core_dev
*mdev
, u32 wanted_timeout
)
3812 /* The supported periods are organized in ascending order */
3813 for (i
= 0; i
< MLX5E_LRO_TIMEOUT_ARR_SIZE
- 1; i
++)
3814 if (MLX5_CAP_ETH(mdev
, lro_timer_supported_periods
[i
]) >= wanted_timeout
)
3817 return MLX5_CAP_ETH(mdev
, lro_timer_supported_periods
[i
]);
3820 void mlx5e_build_nic_params(struct mlx5_core_dev
*mdev
,
3821 struct mlx5e_params
*params
,
3824 u8 cq_period_mode
= 0;
3828 params
->num_channels
= max_channels
;
3832 params
->log_sq_size
= is_kdump_kernel() ?
3833 MLX5E_PARAMS_MINIMUM_LOG_SQ_SIZE
:
3834 MLX5E_PARAMS_DEFAULT_LOG_SQ_SIZE
;
3836 /* set CQE compression */
3837 params
->rx_cqe_compress_def
= false;
3838 if (MLX5_CAP_GEN(mdev
, cqe_compression
) &&
3839 MLX5_CAP_GEN(mdev
, vport_group_manager
)) {
3840 mlx5e_get_max_linkspeed(mdev
, &link_speed
);
3841 mlx5e_get_pci_bw(mdev
, &pci_bw
);
3842 mlx5_core_dbg(mdev
, "Max link speed = %d, PCI BW = %d\n",
3843 link_speed
, pci_bw
);
3844 params
->rx_cqe_compress_def
= cqe_compress_heuristic(link_speed
, pci_bw
);
3846 MLX5E_SET_PFLAG(params
, MLX5E_PFLAG_RX_CQE_COMPRESS
, params
->rx_cqe_compress_def
);
3849 mlx5e_set_rq_params(mdev
, params
);
3852 /* TODO: && MLX5_CAP_ETH(mdev, lro_cap) */
3853 if (params
->rq_wq_type
== MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ
)
3854 params
->lro_en
= true;
3855 params
->lro_timeout
= mlx5e_choose_lro_timeout(mdev
, MLX5E_DEFAULT_LRO_TIMEOUT
);
3857 /* CQ moderation params */
3858 cq_period_mode
= MLX5_CAP_GEN(mdev
, cq_period_start_from_cqe
) ?
3859 MLX5_CQ_PERIOD_MODE_START_FROM_CQE
:
3860 MLX5_CQ_PERIOD_MODE_START_FROM_EQE
;
3861 params
->rx_am_enabled
= MLX5_CAP_GEN(mdev
, cq_moderation
);
3862 mlx5e_set_rx_cq_mode_params(params
, cq_period_mode
);
3864 params
->tx_cq_moderation
.usec
= MLX5E_PARAMS_DEFAULT_TX_CQ_MODERATION_USEC
;
3865 params
->tx_cq_moderation
.pkts
= MLX5E_PARAMS_DEFAULT_TX_CQ_MODERATION_PKTS
;
3868 params
->tx_max_inline
= mlx5e_get_max_inline_cap(mdev
);
3869 mlx5_query_min_inline(mdev
, ¶ms
->tx_min_inline_mode
);
3870 if (params
->tx_min_inline_mode
== MLX5_INLINE_MODE_NONE
&&
3871 !MLX5_CAP_ETH(mdev
, wqe_vlan_insert
))
3872 params
->tx_min_inline_mode
= MLX5_INLINE_MODE_L2
;
3875 params
->rss_hfunc
= ETH_RSS_HASH_XOR
;
3876 netdev_rss_key_fill(params
->toeplitz_hash_key
, sizeof(params
->toeplitz_hash_key
));
3877 mlx5e_build_default_indir_rqt(mdev
, params
->indirection_rqt
,
3878 MLX5E_INDIR_RQT_SIZE
, max_channels
);
3881 static void mlx5e_build_nic_netdev_priv(struct mlx5_core_dev
*mdev
,
3882 struct net_device
*netdev
,
3883 const struct mlx5e_profile
*profile
,
3886 struct mlx5e_priv
*priv
= netdev_priv(netdev
);
3889 priv
->netdev
= netdev
;
3890 priv
->profile
= profile
;
3891 priv
->ppriv
= ppriv
;
3893 mlx5e_build_nic_params(mdev
, &priv
->channels
.params
, profile
->max_nch(mdev
));
3895 mutex_init(&priv
->state_lock
);
3897 INIT_WORK(&priv
->update_carrier_work
, mlx5e_update_carrier_work
);
3898 INIT_WORK(&priv
->set_rx_mode_work
, mlx5e_set_rx_mode_work
);
3899 INIT_WORK(&priv
->tx_timeout_work
, mlx5e_tx_timeout_work
);
3900 INIT_DELAYED_WORK(&priv
->update_stats_work
, mlx5e_update_stats_work
);
3903 static void mlx5e_set_netdev_dev_addr(struct net_device
*netdev
)
3905 struct mlx5e_priv
*priv
= netdev_priv(netdev
);
3907 mlx5_query_nic_vport_mac_address(priv
->mdev
, 0, netdev
->dev_addr
);
3908 if (is_zero_ether_addr(netdev
->dev_addr
) &&
3909 !MLX5_CAP_GEN(priv
->mdev
, vport_group_manager
)) {
3910 eth_hw_addr_random(netdev
);
3911 mlx5_core_info(priv
->mdev
, "Assigned random MAC address %pM\n", netdev
->dev_addr
);
3915 static const struct switchdev_ops mlx5e_switchdev_ops
= {
3916 .switchdev_port_attr_get
= mlx5e_attr_get
,
3919 static void mlx5e_build_nic_netdev(struct net_device
*netdev
)
3921 struct mlx5e_priv
*priv
= netdev_priv(netdev
);
3922 struct mlx5_core_dev
*mdev
= priv
->mdev
;
3926 SET_NETDEV_DEV(netdev
, &mdev
->pdev
->dev
);
3928 if (MLX5_CAP_GEN(mdev
, vport_group_manager
)) {
3929 netdev
->netdev_ops
= &mlx5e_netdev_ops_sriov
;
3930 #ifdef CONFIG_MLX5_CORE_EN_DCB
3931 if (MLX5_CAP_GEN(mdev
, qos
))
3932 netdev
->dcbnl_ops
= &mlx5e_dcbnl_ops
;
3935 netdev
->netdev_ops
= &mlx5e_netdev_ops_basic
;
3938 netdev
->watchdog_timeo
= 15 * HZ
;
3940 netdev
->ethtool_ops
= &mlx5e_ethtool_ops
;
3942 netdev
->vlan_features
|= NETIF_F_SG
;
3943 netdev
->vlan_features
|= NETIF_F_IP_CSUM
;
3944 netdev
->vlan_features
|= NETIF_F_IPV6_CSUM
;
3945 netdev
->vlan_features
|= NETIF_F_GRO
;
3946 netdev
->vlan_features
|= NETIF_F_TSO
;
3947 netdev
->vlan_features
|= NETIF_F_TSO6
;
3948 netdev
->vlan_features
|= NETIF_F_RXCSUM
;
3949 netdev
->vlan_features
|= NETIF_F_RXHASH
;
3951 if (!!MLX5_CAP_ETH(mdev
, lro_cap
))
3952 netdev
->vlan_features
|= NETIF_F_LRO
;
3954 netdev
->hw_features
= netdev
->vlan_features
;
3955 netdev
->hw_features
|= NETIF_F_HW_VLAN_CTAG_TX
;
3956 netdev
->hw_features
|= NETIF_F_HW_VLAN_CTAG_RX
;
3957 netdev
->hw_features
|= NETIF_F_HW_VLAN_CTAG_FILTER
;
3959 if (mlx5e_vxlan_allowed(mdev
)) {
3960 netdev
->hw_features
|= NETIF_F_GSO_UDP_TUNNEL
|
3961 NETIF_F_GSO_UDP_TUNNEL_CSUM
|
3962 NETIF_F_GSO_PARTIAL
;
3963 netdev
->hw_enc_features
|= NETIF_F_IP_CSUM
;
3964 netdev
->hw_enc_features
|= NETIF_F_IPV6_CSUM
;
3965 netdev
->hw_enc_features
|= NETIF_F_TSO
;
3966 netdev
->hw_enc_features
|= NETIF_F_TSO6
;
3967 netdev
->hw_enc_features
|= NETIF_F_GSO_UDP_TUNNEL
;
3968 netdev
->hw_enc_features
|= NETIF_F_GSO_UDP_TUNNEL_CSUM
|
3969 NETIF_F_GSO_PARTIAL
;
3970 netdev
->gso_partial_features
= NETIF_F_GSO_UDP_TUNNEL_CSUM
;
3973 mlx5_query_port_fcs(mdev
, &fcs_supported
, &fcs_enabled
);
3976 netdev
->hw_features
|= NETIF_F_RXALL
;
3978 if (MLX5_CAP_ETH(mdev
, scatter_fcs
))
3979 netdev
->hw_features
|= NETIF_F_RXFCS
;
3981 netdev
->features
= netdev
->hw_features
;
3982 if (!priv
->channels
.params
.lro_en
)
3983 netdev
->features
&= ~NETIF_F_LRO
;
3986 netdev
->features
&= ~NETIF_F_RXALL
;
3988 if (!priv
->channels
.params
.scatter_fcs_en
)
3989 netdev
->features
&= ~NETIF_F_RXFCS
;
3991 #define FT_CAP(f) MLX5_CAP_FLOWTABLE(mdev, flow_table_properties_nic_receive.f)
3992 if (FT_CAP(flow_modify_en
) &&
3993 FT_CAP(modify_root
) &&
3994 FT_CAP(identified_miss_table_mode
) &&
3995 FT_CAP(flow_table_modify
)) {
3996 netdev
->hw_features
|= NETIF_F_HW_TC
;
3997 #ifdef CONFIG_RFS_ACCEL
3998 netdev
->hw_features
|= NETIF_F_NTUPLE
;
4002 netdev
->features
|= NETIF_F_HIGHDMA
;
4004 netdev
->priv_flags
|= IFF_UNICAST_FLT
;
4006 mlx5e_set_netdev_dev_addr(netdev
);
4008 #ifdef CONFIG_NET_SWITCHDEV
4009 if (MLX5_CAP_GEN(mdev
, vport_group_manager
))
4010 netdev
->switchdev_ops
= &mlx5e_switchdev_ops
;
4014 static void mlx5e_create_q_counter(struct mlx5e_priv
*priv
)
4016 struct mlx5_core_dev
*mdev
= priv
->mdev
;
4019 err
= mlx5_core_alloc_q_counter(mdev
, &priv
->q_counter
);
4021 mlx5_core_warn(mdev
, "alloc queue counter failed, %d\n", err
);
4022 priv
->q_counter
= 0;
4026 static void mlx5e_destroy_q_counter(struct mlx5e_priv
*priv
)
4028 if (!priv
->q_counter
)
4031 mlx5_core_dealloc_q_counter(priv
->mdev
, priv
->q_counter
);
4034 static void mlx5e_nic_init(struct mlx5_core_dev
*mdev
,
4035 struct net_device
*netdev
,
4036 const struct mlx5e_profile
*profile
,
4039 struct mlx5e_priv
*priv
= netdev_priv(netdev
);
4041 mlx5e_build_nic_netdev_priv(mdev
, netdev
, profile
, ppriv
);
4042 mlx5e_build_nic_netdev(netdev
);
4043 mlx5e_vxlan_init(priv
);
4046 static void mlx5e_nic_cleanup(struct mlx5e_priv
*priv
)
4048 mlx5e_vxlan_cleanup(priv
);
4050 if (priv
->channels
.params
.xdp_prog
)
4051 bpf_prog_put(priv
->channels
.params
.xdp_prog
);
4054 static int mlx5e_init_nic_rx(struct mlx5e_priv
*priv
)
4056 struct mlx5_core_dev
*mdev
= priv
->mdev
;
4059 err
= mlx5e_create_indirect_rqt(priv
);
4063 err
= mlx5e_create_direct_rqts(priv
);
4065 goto err_destroy_indirect_rqts
;
4067 err
= mlx5e_create_indirect_tirs(priv
);
4069 goto err_destroy_direct_rqts
;
4071 err
= mlx5e_create_direct_tirs(priv
);
4073 goto err_destroy_indirect_tirs
;
4075 err
= mlx5e_create_flow_steering(priv
);
4077 mlx5_core_warn(mdev
, "create flow steering failed, %d\n", err
);
4078 goto err_destroy_direct_tirs
;
4081 err
= mlx5e_tc_init(priv
);
4083 goto err_destroy_flow_steering
;
4087 err_destroy_flow_steering
:
4088 mlx5e_destroy_flow_steering(priv
);
4089 err_destroy_direct_tirs
:
4090 mlx5e_destroy_direct_tirs(priv
);
4091 err_destroy_indirect_tirs
:
4092 mlx5e_destroy_indirect_tirs(priv
);
4093 err_destroy_direct_rqts
:
4094 mlx5e_destroy_direct_rqts(priv
);
4095 err_destroy_indirect_rqts
:
4096 mlx5e_destroy_rqt(priv
, &priv
->indir_rqt
);
4100 static void mlx5e_cleanup_nic_rx(struct mlx5e_priv
*priv
)
4102 mlx5e_tc_cleanup(priv
);
4103 mlx5e_destroy_flow_steering(priv
);
4104 mlx5e_destroy_direct_tirs(priv
);
4105 mlx5e_destroy_indirect_tirs(priv
);
4106 mlx5e_destroy_direct_rqts(priv
);
4107 mlx5e_destroy_rqt(priv
, &priv
->indir_rqt
);
4110 static int mlx5e_init_nic_tx(struct mlx5e_priv
*priv
)
4114 err
= mlx5e_create_tises(priv
);
4116 mlx5_core_warn(priv
->mdev
, "create tises failed, %d\n", err
);
4120 #ifdef CONFIG_MLX5_CORE_EN_DCB
4121 mlx5e_dcbnl_initialize(priv
);
4126 static void mlx5e_register_vport_rep(struct mlx5_core_dev
*mdev
)
4128 struct mlx5_eswitch
*esw
= mdev
->priv
.eswitch
;
4129 int total_vfs
= MLX5_TOTAL_VPORTS(mdev
);
4133 if (!MLX5_CAP_GEN(mdev
, vport_group_manager
))
4136 mlx5_query_nic_vport_mac_address(mdev
, 0, mac
);
4138 for (vport
= 1; vport
< total_vfs
; vport
++) {
4139 struct mlx5_eswitch_rep rep
;
4141 rep
.load
= mlx5e_vport_rep_load
;
4142 rep
.unload
= mlx5e_vport_rep_unload
;
4144 ether_addr_copy(rep
.hw_id
, mac
);
4145 mlx5_eswitch_register_vport_rep(esw
, vport
, &rep
);
4149 static void mlx5e_unregister_vport_rep(struct mlx5_core_dev
*mdev
)
4151 struct mlx5_eswitch
*esw
= mdev
->priv
.eswitch
;
4152 int total_vfs
= MLX5_TOTAL_VPORTS(mdev
);
4155 if (!MLX5_CAP_GEN(mdev
, vport_group_manager
))
4158 for (vport
= 1; vport
< total_vfs
; vport
++)
4159 mlx5_eswitch_unregister_vport_rep(esw
, vport
);
4162 static void mlx5e_nic_enable(struct mlx5e_priv
*priv
)
4164 struct net_device
*netdev
= priv
->netdev
;
4165 struct mlx5_core_dev
*mdev
= priv
->mdev
;
4166 struct mlx5_eswitch
*esw
= mdev
->priv
.eswitch
;
4167 struct mlx5_eswitch_rep rep
;
4170 mlx5e_init_l2_addr(priv
);
4172 /* MTU range: 68 - hw-specific max */
4173 netdev
->min_mtu
= ETH_MIN_MTU
;
4174 mlx5_query_port_max_mtu(priv
->mdev
, &max_mtu
, 1);
4175 netdev
->max_mtu
= MLX5E_HW2SW_MTU(max_mtu
);
4176 mlx5e_set_dev_port_mtu(priv
);
4178 mlx5_lag_add(mdev
, netdev
);
4180 mlx5e_enable_async_events(priv
);
4182 if (MLX5_CAP_GEN(mdev
, vport_group_manager
)) {
4183 mlx5_query_nic_vport_mac_address(mdev
, 0, rep
.hw_id
);
4184 rep
.load
= mlx5e_nic_rep_load
;
4185 rep
.unload
= mlx5e_nic_rep_unload
;
4186 rep
.vport
= FDB_UPLINK_VPORT
;
4187 rep
.netdev
= netdev
;
4188 mlx5_eswitch_register_vport_rep(esw
, 0, &rep
);
4191 mlx5e_register_vport_rep(mdev
);
4193 if (netdev
->reg_state
!= NETREG_REGISTERED
)
4196 /* Device already registered: sync netdev system state */
4197 if (mlx5e_vxlan_allowed(mdev
)) {
4199 udp_tunnel_get_rx_info(netdev
);
4203 queue_work(priv
->wq
, &priv
->set_rx_mode_work
);
4206 if (netif_running(netdev
))
4208 netif_device_attach(netdev
);
4212 static void mlx5e_nic_disable(struct mlx5e_priv
*priv
)
4214 struct mlx5_core_dev
*mdev
= priv
->mdev
;
4215 struct mlx5_eswitch
*esw
= mdev
->priv
.eswitch
;
4218 if (netif_running(priv
->netdev
))
4219 mlx5e_close(priv
->netdev
);
4220 netif_device_detach(priv
->netdev
);
4223 queue_work(priv
->wq
, &priv
->set_rx_mode_work
);
4224 mlx5e_unregister_vport_rep(mdev
);
4225 if (MLX5_CAP_GEN(mdev
, vport_group_manager
))
4226 mlx5_eswitch_unregister_vport_rep(esw
, 0);
4227 mlx5e_disable_async_events(priv
);
4228 mlx5_lag_remove(mdev
);
4231 static const struct mlx5e_profile mlx5e_nic_profile
= {
4232 .init
= mlx5e_nic_init
,
4233 .cleanup
= mlx5e_nic_cleanup
,
4234 .init_rx
= mlx5e_init_nic_rx
,
4235 .cleanup_rx
= mlx5e_cleanup_nic_rx
,
4236 .init_tx
= mlx5e_init_nic_tx
,
4237 .cleanup_tx
= mlx5e_cleanup_nic_tx
,
4238 .enable
= mlx5e_nic_enable
,
4239 .disable
= mlx5e_nic_disable
,
4240 .update_stats
= mlx5e_update_stats
,
4241 .max_nch
= mlx5e_get_max_num_channels
,
4242 .rx_handlers
.handle_rx_cqe
= mlx5e_handle_rx_cqe
,
4243 .rx_handlers
.handle_rx_cqe_mpwqe
= mlx5e_handle_rx_cqe_mpwrq
,
4244 .max_tc
= MLX5E_MAX_NUM_TC
,
4247 /* mlx5e generic netdev management API (move to en_common.c) */
4249 struct net_device
*mlx5e_create_netdev(struct mlx5_core_dev
*mdev
,
4250 const struct mlx5e_profile
*profile
,
4253 int nch
= profile
->max_nch(mdev
);
4254 struct net_device
*netdev
;
4255 struct mlx5e_priv
*priv
;
4257 netdev
= alloc_etherdev_mqs(sizeof(struct mlx5e_priv
),
4258 nch
* profile
->max_tc
,
4261 mlx5_core_err(mdev
, "alloc_etherdev_mqs() failed\n");
4265 #ifdef CONFIG_RFS_ACCEL
4266 netdev
->rx_cpu_rmap
= mdev
->rmap
;
4269 profile
->init(mdev
, netdev
, profile
, ppriv
);
4271 netif_carrier_off(netdev
);
4273 priv
= netdev_priv(netdev
);
4275 priv
->wq
= create_singlethread_workqueue("mlx5e");
4277 goto err_cleanup_nic
;
4282 profile
->cleanup(priv
);
4283 free_netdev(netdev
);
4288 int mlx5e_attach_netdev(struct mlx5e_priv
*priv
)
4290 struct mlx5_core_dev
*mdev
= priv
->mdev
;
4291 const struct mlx5e_profile
*profile
;
4294 profile
= priv
->profile
;
4295 clear_bit(MLX5E_STATE_DESTROYING
, &priv
->state
);
4297 err
= profile
->init_tx(priv
);
4301 err
= mlx5e_open_drop_rq(mdev
, &priv
->drop_rq
);
4303 mlx5_core_err(mdev
, "open drop rq failed, %d\n", err
);
4304 goto err_cleanup_tx
;
4307 err
= profile
->init_rx(priv
);
4309 goto err_close_drop_rq
;
4311 mlx5e_create_q_counter(priv
);
4313 if (profile
->enable
)
4314 profile
->enable(priv
);
4319 mlx5e_close_drop_rq(&priv
->drop_rq
);
4322 profile
->cleanup_tx(priv
);
4328 void mlx5e_detach_netdev(struct mlx5e_priv
*priv
)
4330 const struct mlx5e_profile
*profile
= priv
->profile
;
4332 set_bit(MLX5E_STATE_DESTROYING
, &priv
->state
);
4334 if (profile
->disable
)
4335 profile
->disable(priv
);
4336 flush_workqueue(priv
->wq
);
4338 mlx5e_destroy_q_counter(priv
);
4339 profile
->cleanup_rx(priv
);
4340 mlx5e_close_drop_rq(&priv
->drop_rq
);
4341 profile
->cleanup_tx(priv
);
4342 cancel_delayed_work_sync(&priv
->update_stats_work
);
4345 void mlx5e_destroy_netdev(struct mlx5e_priv
*priv
)
4347 const struct mlx5e_profile
*profile
= priv
->profile
;
4348 struct net_device
*netdev
= priv
->netdev
;
4350 destroy_workqueue(priv
->wq
);
4351 if (profile
->cleanup
)
4352 profile
->cleanup(priv
);
4353 free_netdev(netdev
);
4356 /* mlx5e_attach and mlx5e_detach scope should be only creating/destroying
4357 * hardware contexts and to connect it to the current netdev.
4359 static int mlx5e_attach(struct mlx5_core_dev
*mdev
, void *vpriv
)
4361 struct mlx5e_priv
*priv
= vpriv
;
4362 struct net_device
*netdev
= priv
->netdev
;
4365 if (netif_device_present(netdev
))
4368 err
= mlx5e_create_mdev_resources(mdev
);
4372 err
= mlx5e_attach_netdev(priv
);
4374 mlx5e_destroy_mdev_resources(mdev
);
4381 static void mlx5e_detach(struct mlx5_core_dev
*mdev
, void *vpriv
)
4383 struct mlx5e_priv
*priv
= vpriv
;
4384 struct net_device
*netdev
= priv
->netdev
;
4386 if (!netif_device_present(netdev
))
4389 mlx5e_detach_netdev(priv
);
4390 mlx5e_destroy_mdev_resources(mdev
);
4393 static void *mlx5e_add(struct mlx5_core_dev
*mdev
)
4395 struct mlx5_eswitch
*esw
= mdev
->priv
.eswitch
;
4396 int total_vfs
= MLX5_TOTAL_VPORTS(mdev
);
4401 struct net_device
*netdev
;
4403 err
= mlx5e_check_required_hca_cap(mdev
);
4407 if (MLX5_CAP_GEN(mdev
, vport_group_manager
))
4408 ppriv
= &esw
->offloads
.vport_reps
[0];
4410 netdev
= mlx5e_create_netdev(mdev
, &mlx5e_nic_profile
, ppriv
);
4412 mlx5_core_err(mdev
, "mlx5e_create_netdev failed\n");
4413 goto err_unregister_reps
;
4416 priv
= netdev_priv(netdev
);
4418 err
= mlx5e_attach(mdev
, priv
);
4420 mlx5_core_err(mdev
, "mlx5e_attach failed, %d\n", err
);
4421 goto err_destroy_netdev
;
4424 err
= register_netdev(netdev
);
4426 mlx5_core_err(mdev
, "register_netdev failed, %d\n", err
);
4433 mlx5e_detach(mdev
, priv
);
4436 mlx5e_destroy_netdev(priv
);
4438 err_unregister_reps
:
4439 for (vport
= 1; vport
< total_vfs
; vport
++)
4440 mlx5_eswitch_unregister_vport_rep(esw
, vport
);
4445 static void mlx5e_remove(struct mlx5_core_dev
*mdev
, void *vpriv
)
4447 struct mlx5e_priv
*priv
= vpriv
;
4449 unregister_netdev(priv
->netdev
);
4450 mlx5e_detach(mdev
, vpriv
);
4451 mlx5e_destroy_netdev(priv
);
4454 static void *mlx5e_get_netdev(void *vpriv
)
4456 struct mlx5e_priv
*priv
= vpriv
;
4458 return priv
->netdev
;
4461 static struct mlx5_interface mlx5e_interface
= {
4463 .remove
= mlx5e_remove
,
4464 .attach
= mlx5e_attach
,
4465 .detach
= mlx5e_detach
,
4466 .event
= mlx5e_async_event
,
4467 .protocol
= MLX5_INTERFACE_PROTOCOL_ETH
,
4468 .get_dev
= mlx5e_get_netdev
,
4471 void mlx5e_init(void)
4473 mlx5e_build_ptys2ethtool_map();
4474 mlx5_register_interface(&mlx5e_interface
);
4477 void mlx5e_cleanup(void)
4479 mlx5_unregister_interface(&mlx5e_interface
);