2 * Copyright (c) 2015-2016, Mellanox Technologies. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33 #include <net/tc_act/tc_gact.h>
34 #include <net/pkt_cls.h>
35 #include <linux/mlx5/fs.h>
36 #include <net/vxlan.h>
37 #include <linux/bpf.h>
42 #include "en_accel/ipsec.h"
43 #include "en_accel/ipsec_rxtx.h"
44 #include "accel/ipsec.h"
47 struct mlx5e_rq_param
{
48 u32 rqc
[MLX5_ST_SZ_DW(rqc
)];
49 struct mlx5_wq_param wq
;
52 struct mlx5e_sq_param
{
53 u32 sqc
[MLX5_ST_SZ_DW(sqc
)];
54 struct mlx5_wq_param wq
;
57 struct mlx5e_cq_param
{
58 u32 cqc
[MLX5_ST_SZ_DW(cqc
)];
59 struct mlx5_wq_param wq
;
64 struct mlx5e_channel_param
{
65 struct mlx5e_rq_param rq
;
66 struct mlx5e_sq_param sq
;
67 struct mlx5e_sq_param xdp_sq
;
68 struct mlx5e_sq_param icosq
;
69 struct mlx5e_cq_param rx_cq
;
70 struct mlx5e_cq_param tx_cq
;
71 struct mlx5e_cq_param icosq_cq
;
74 static bool mlx5e_check_fragmented_striding_rq_cap(struct mlx5_core_dev
*mdev
)
76 return MLX5_CAP_GEN(mdev
, striding_rq
) &&
77 MLX5_CAP_GEN(mdev
, umr_ptr_rlky
) &&
78 MLX5_CAP_ETH(mdev
, reg_umr_sq
);
81 void mlx5e_init_rq_type_params(struct mlx5_core_dev
*mdev
,
82 struct mlx5e_params
*params
, u8 rq_type
)
84 params
->rq_wq_type
= rq_type
;
85 params
->lro_wqe_sz
= MLX5E_PARAMS_DEFAULT_LRO_WQE_SZ
;
86 switch (params
->rq_wq_type
) {
87 case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ
:
88 params
->log_rq_size
= is_kdump_kernel() ?
89 MLX5E_PARAMS_MINIMUM_LOG_RQ_SIZE_MPW
:
90 MLX5E_PARAMS_DEFAULT_LOG_RQ_SIZE_MPW
;
91 params
->mpwqe_log_stride_sz
= MLX5E_MPWQE_STRIDE_SZ(mdev
,
92 MLX5E_GET_PFLAG(params
, MLX5E_PFLAG_RX_CQE_COMPRESS
));
93 params
->mpwqe_log_num_strides
= MLX5_MPWRQ_LOG_WQE_SZ
-
94 params
->mpwqe_log_stride_sz
;
96 default: /* MLX5_WQ_TYPE_LINKED_LIST */
97 params
->log_rq_size
= is_kdump_kernel() ?
98 MLX5E_PARAMS_MINIMUM_LOG_RQ_SIZE
:
99 MLX5E_PARAMS_DEFAULT_LOG_RQ_SIZE
;
100 params
->rq_headroom
= params
->xdp_prog
?
101 XDP_PACKET_HEADROOM
: MLX5_RX_HEADROOM
;
102 params
->rq_headroom
+= NET_IP_ALIGN
;
104 /* Extra room needed for build_skb */
105 params
->lro_wqe_sz
-= params
->rq_headroom
+
106 SKB_DATA_ALIGN(sizeof(struct skb_shared_info
));
109 mlx5_core_info(mdev
, "MLX5E: StrdRq(%d) RqSz(%ld) StrdSz(%ld) RxCqeCmprss(%d)\n",
110 params
->rq_wq_type
== MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ
,
111 BIT(params
->log_rq_size
),
112 BIT(params
->mpwqe_log_stride_sz
),
113 MLX5E_GET_PFLAG(params
, MLX5E_PFLAG_RX_CQE_COMPRESS
));
116 static void mlx5e_set_rq_params(struct mlx5_core_dev
*mdev
,
117 struct mlx5e_params
*params
)
119 u8 rq_type
= mlx5e_check_fragmented_striding_rq_cap(mdev
) &&
120 !params
->xdp_prog
&& !MLX5_IPSEC_DEV(mdev
) ?
121 MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ
:
122 MLX5_WQ_TYPE_LINKED_LIST
;
123 mlx5e_init_rq_type_params(mdev
, params
, rq_type
);
126 static void mlx5e_update_carrier(struct mlx5e_priv
*priv
)
128 struct mlx5_core_dev
*mdev
= priv
->mdev
;
131 port_state
= mlx5_query_vport_state(mdev
,
132 MLX5_QUERY_VPORT_STATE_IN_OP_MOD_VNIC_VPORT
,
135 if (port_state
== VPORT_STATE_UP
) {
136 netdev_info(priv
->netdev
, "Link up\n");
137 netif_carrier_on(priv
->netdev
);
139 netdev_info(priv
->netdev
, "Link down\n");
140 netif_carrier_off(priv
->netdev
);
144 static void mlx5e_update_carrier_work(struct work_struct
*work
)
146 struct mlx5e_priv
*priv
= container_of(work
, struct mlx5e_priv
,
147 update_carrier_work
);
149 mutex_lock(&priv
->state_lock
);
150 if (test_bit(MLX5E_STATE_OPENED
, &priv
->state
))
151 if (priv
->profile
->update_carrier
)
152 priv
->profile
->update_carrier(priv
);
153 mutex_unlock(&priv
->state_lock
);
156 static void mlx5e_tx_timeout_work(struct work_struct
*work
)
158 struct mlx5e_priv
*priv
= container_of(work
, struct mlx5e_priv
,
163 mutex_lock(&priv
->state_lock
);
164 if (!test_bit(MLX5E_STATE_OPENED
, &priv
->state
))
166 mlx5e_close_locked(priv
->netdev
);
167 err
= mlx5e_open_locked(priv
->netdev
);
169 netdev_err(priv
->netdev
, "mlx5e_open_locked failed recovering from a tx_timeout, err(%d).\n",
172 mutex_unlock(&priv
->state_lock
);
176 static void mlx5e_update_sw_counters(struct mlx5e_priv
*priv
)
178 struct mlx5e_sw_stats temp
, *s
= &temp
;
179 struct mlx5e_rq_stats
*rq_stats
;
180 struct mlx5e_sq_stats
*sq_stats
;
183 memset(s
, 0, sizeof(*s
));
184 for (i
= 0; i
< priv
->channels
.num
; i
++) {
185 struct mlx5e_channel
*c
= priv
->channels
.c
[i
];
187 rq_stats
= &c
->rq
.stats
;
189 s
->rx_packets
+= rq_stats
->packets
;
190 s
->rx_bytes
+= rq_stats
->bytes
;
191 s
->rx_lro_packets
+= rq_stats
->lro_packets
;
192 s
->rx_lro_bytes
+= rq_stats
->lro_bytes
;
193 s
->rx_ecn_mark
+= rq_stats
->ecn_mark
;
194 s
->rx_removed_vlan_packets
+= rq_stats
->removed_vlan_packets
;
195 s
->rx_csum_none
+= rq_stats
->csum_none
;
196 s
->rx_csum_complete
+= rq_stats
->csum_complete
;
197 s
->rx_csum_complete_tail
+= rq_stats
->csum_complete_tail
;
198 s
->rx_csum_complete_tail_slow
+= rq_stats
->csum_complete_tail_slow
;
199 s
->rx_csum_unnecessary
+= rq_stats
->csum_unnecessary
;
200 s
->rx_csum_unnecessary_inner
+= rq_stats
->csum_unnecessary_inner
;
201 s
->rx_xdp_drop
+= rq_stats
->xdp_drop
;
202 s
->rx_xdp_tx
+= rq_stats
->xdp_tx
;
203 s
->rx_xdp_tx_full
+= rq_stats
->xdp_tx_full
;
204 s
->rx_wqe_err
+= rq_stats
->wqe_err
;
205 s
->rx_mpwqe_filler
+= rq_stats
->mpwqe_filler
;
206 s
->rx_buff_alloc_err
+= rq_stats
->buff_alloc_err
;
207 s
->rx_cqe_compress_blks
+= rq_stats
->cqe_compress_blks
;
208 s
->rx_cqe_compress_pkts
+= rq_stats
->cqe_compress_pkts
;
209 s
->rx_page_reuse
+= rq_stats
->page_reuse
;
210 s
->rx_cache_reuse
+= rq_stats
->cache_reuse
;
211 s
->rx_cache_full
+= rq_stats
->cache_full
;
212 s
->rx_cache_empty
+= rq_stats
->cache_empty
;
213 s
->rx_cache_busy
+= rq_stats
->cache_busy
;
214 s
->rx_cache_waive
+= rq_stats
->cache_waive
;
216 for (j
= 0; j
< priv
->channels
.params
.num_tc
; j
++) {
217 sq_stats
= &c
->sq
[j
].stats
;
219 s
->tx_packets
+= sq_stats
->packets
;
220 s
->tx_bytes
+= sq_stats
->bytes
;
221 s
->tx_tso_packets
+= sq_stats
->tso_packets
;
222 s
->tx_tso_bytes
+= sq_stats
->tso_bytes
;
223 s
->tx_tso_inner_packets
+= sq_stats
->tso_inner_packets
;
224 s
->tx_tso_inner_bytes
+= sq_stats
->tso_inner_bytes
;
225 s
->tx_added_vlan_packets
+= sq_stats
->added_vlan_packets
;
226 s
->tx_queue_stopped
+= sq_stats
->stopped
;
227 s
->tx_queue_wake
+= sq_stats
->wake
;
228 s
->tx_queue_dropped
+= sq_stats
->dropped
;
229 s
->tx_xmit_more
+= sq_stats
->xmit_more
;
230 s
->tx_csum_partial_inner
+= sq_stats
->csum_partial_inner
;
231 s
->tx_csum_none
+= sq_stats
->csum_none
;
232 s
->tx_csum_partial
+= sq_stats
->csum_partial
;
236 s
->link_down_events_phy
= MLX5_GET(ppcnt_reg
,
237 priv
->stats
.pport
.phy_counters
,
238 counter_set
.phys_layer_cntrs
.link_down_events
);
239 memcpy(&priv
->stats
.sw
, s
, sizeof(*s
));
242 static void mlx5e_update_vport_counters(struct mlx5e_priv
*priv
)
244 int outlen
= MLX5_ST_SZ_BYTES(query_vport_counter_out
);
245 u32
*out
= (u32
*)priv
->stats
.vport
.query_vport_out
;
246 u32 in
[MLX5_ST_SZ_DW(query_vport_counter_in
)] = {0};
247 struct mlx5_core_dev
*mdev
= priv
->mdev
;
249 MLX5_SET(query_vport_counter_in
, in
, opcode
,
250 MLX5_CMD_OP_QUERY_VPORT_COUNTER
);
251 MLX5_SET(query_vport_counter_in
, in
, op_mod
, 0);
252 MLX5_SET(query_vport_counter_in
, in
, other_vport
, 0);
254 mlx5_cmd_exec(mdev
, in
, sizeof(in
), out
, outlen
);
257 static void mlx5e_update_pport_counters(struct mlx5e_priv
*priv
, bool full
)
259 struct mlx5e_pport_stats
*pstats
= &priv
->stats
.pport
;
260 struct mlx5_core_dev
*mdev
= priv
->mdev
;
261 u32 in
[MLX5_ST_SZ_DW(ppcnt_reg
)] = {0};
262 int sz
= MLX5_ST_SZ_BYTES(ppcnt_reg
);
266 MLX5_SET(ppcnt_reg
, in
, local_port
, 1);
268 out
= pstats
->IEEE_802_3_counters
;
269 MLX5_SET(ppcnt_reg
, in
, grp
, MLX5_IEEE_802_3_COUNTERS_GROUP
);
270 mlx5_core_access_reg(mdev
, in
, sz
, out
, sz
, MLX5_REG_PPCNT
, 0, 0);
275 out
= pstats
->RFC_2863_counters
;
276 MLX5_SET(ppcnt_reg
, in
, grp
, MLX5_RFC_2863_COUNTERS_GROUP
);
277 mlx5_core_access_reg(mdev
, in
, sz
, out
, sz
, MLX5_REG_PPCNT
, 0, 0);
279 out
= pstats
->RFC_2819_counters
;
280 MLX5_SET(ppcnt_reg
, in
, grp
, MLX5_RFC_2819_COUNTERS_GROUP
);
281 mlx5_core_access_reg(mdev
, in
, sz
, out
, sz
, MLX5_REG_PPCNT
, 0, 0);
283 out
= pstats
->phy_counters
;
284 MLX5_SET(ppcnt_reg
, in
, grp
, MLX5_PHYSICAL_LAYER_COUNTERS_GROUP
);
285 mlx5_core_access_reg(mdev
, in
, sz
, out
, sz
, MLX5_REG_PPCNT
, 0, 0);
287 if (MLX5_CAP_PCAM_FEATURE(mdev
, ppcnt_statistical_group
)) {
288 out
= pstats
->phy_statistical_counters
;
289 MLX5_SET(ppcnt_reg
, in
, grp
, MLX5_PHYSICAL_LAYER_STATISTICAL_GROUP
);
290 mlx5_core_access_reg(mdev
, in
, sz
, out
, sz
, MLX5_REG_PPCNT
, 0, 0);
293 if (MLX5_CAP_PCAM_FEATURE(mdev
, rx_buffer_fullness_counters
)) {
294 out
= pstats
->eth_ext_counters
;
295 MLX5_SET(ppcnt_reg
, in
, grp
, MLX5_ETHERNET_EXTENDED_COUNTERS_GROUP
);
296 mlx5_core_access_reg(mdev
, in
, sz
, out
, sz
, MLX5_REG_PPCNT
, 0, 0);
299 MLX5_SET(ppcnt_reg
, in
, grp
, MLX5_PER_PRIORITY_COUNTERS_GROUP
);
300 for (prio
= 0; prio
< NUM_PPORT_PRIO
; prio
++) {
301 out
= pstats
->per_prio_counters
[prio
];
302 MLX5_SET(ppcnt_reg
, in
, prio_tc
, prio
);
303 mlx5_core_access_reg(mdev
, in
, sz
, out
, sz
,
304 MLX5_REG_PPCNT
, 0, 0);
308 static void mlx5e_update_q_counter(struct mlx5e_priv
*priv
)
310 struct mlx5e_qcounter_stats
*qcnt
= &priv
->stats
.qcnt
;
311 u32 out
[MLX5_ST_SZ_DW(query_q_counter_out
)];
314 if (!priv
->q_counter
)
317 err
= mlx5_core_query_q_counter(priv
->mdev
, priv
->q_counter
, 0, out
, sizeof(out
));
321 qcnt
->rx_out_of_buffer
= MLX5_GET(query_q_counter_out
, out
, out_of_buffer
);
324 static void mlx5e_update_pcie_counters(struct mlx5e_priv
*priv
)
326 struct mlx5e_pcie_stats
*pcie_stats
= &priv
->stats
.pcie
;
327 struct mlx5_core_dev
*mdev
= priv
->mdev
;
328 u32 in
[MLX5_ST_SZ_DW(mpcnt_reg
)] = {0};
329 int sz
= MLX5_ST_SZ_BYTES(mpcnt_reg
);
332 if (!MLX5_CAP_MCAM_FEATURE(mdev
, pcie_performance_group
))
335 out
= pcie_stats
->pcie_perf_counters
;
336 MLX5_SET(mpcnt_reg
, in
, grp
, MLX5_PCIE_PERFORMANCE_COUNTERS_GROUP
);
337 mlx5_core_access_reg(mdev
, in
, sz
, out
, sz
, MLX5_REG_MPCNT
, 0, 0);
340 void mlx5e_update_stats(struct mlx5e_priv
*priv
, bool full
)
343 mlx5e_update_pcie_counters(priv
);
344 mlx5e_ipsec_update_stats(priv
);
346 mlx5e_update_pport_counters(priv
, full
);
347 mlx5e_update_vport_counters(priv
);
348 mlx5e_update_q_counter(priv
);
349 mlx5e_update_sw_counters(priv
);
352 static void mlx5e_update_ndo_stats(struct mlx5e_priv
*priv
)
354 mlx5e_update_stats(priv
, false);
357 void mlx5e_update_stats_work(struct work_struct
*work
)
359 struct delayed_work
*dwork
= to_delayed_work(work
);
360 struct mlx5e_priv
*priv
= container_of(dwork
, struct mlx5e_priv
,
362 mutex_lock(&priv
->state_lock
);
363 if (test_bit(MLX5E_STATE_OPENED
, &priv
->state
)) {
364 priv
->profile
->update_stats(priv
);
365 queue_delayed_work(priv
->wq
, dwork
,
366 msecs_to_jiffies(MLX5E_UPDATE_STATS_INTERVAL
));
368 mutex_unlock(&priv
->state_lock
);
371 static void mlx5e_async_event(struct mlx5_core_dev
*mdev
, void *vpriv
,
372 enum mlx5_dev_event event
, unsigned long param
)
374 struct mlx5e_priv
*priv
= vpriv
;
376 if (!test_bit(MLX5E_STATE_ASYNC_EVENTS_ENABLED
, &priv
->state
))
380 case MLX5_DEV_EVENT_PORT_UP
:
381 case MLX5_DEV_EVENT_PORT_DOWN
:
382 queue_work(priv
->wq
, &priv
->update_carrier_work
);
389 static void mlx5e_enable_async_events(struct mlx5e_priv
*priv
)
391 set_bit(MLX5E_STATE_ASYNC_EVENTS_ENABLED
, &priv
->state
);
394 static void mlx5e_disable_async_events(struct mlx5e_priv
*priv
)
396 clear_bit(MLX5E_STATE_ASYNC_EVENTS_ENABLED
, &priv
->state
);
397 synchronize_irq(pci_irq_vector(priv
->mdev
->pdev
, MLX5_EQ_VEC_ASYNC
));
400 static inline int mlx5e_get_wqe_mtt_sz(void)
402 /* UMR copies MTTs in units of MLX5_UMR_MTT_ALIGNMENT bytes.
403 * To avoid copying garbage after the mtt array, we allocate
406 return ALIGN(MLX5_MPWRQ_PAGES_PER_WQE
* sizeof(__be64
),
407 MLX5_UMR_MTT_ALIGNMENT
);
410 static inline void mlx5e_build_umr_wqe(struct mlx5e_rq
*rq
,
411 struct mlx5e_icosq
*sq
,
412 struct mlx5e_umr_wqe
*wqe
,
415 struct mlx5_wqe_ctrl_seg
*cseg
= &wqe
->ctrl
;
416 struct mlx5_wqe_umr_ctrl_seg
*ucseg
= &wqe
->uctrl
;
417 struct mlx5_wqe_data_seg
*dseg
= &wqe
->data
;
418 struct mlx5e_mpw_info
*wi
= &rq
->mpwqe
.info
[ix
];
419 u8 ds_cnt
= DIV_ROUND_UP(sizeof(*wqe
), MLX5_SEND_WQE_DS
);
420 u32 umr_wqe_mtt_offset
= mlx5e_get_wqe_mtt_offset(rq
, ix
);
422 cseg
->qpn_ds
= cpu_to_be32((sq
->sqn
<< MLX5_WQE_CTRL_QPN_SHIFT
) |
424 cseg
->fm_ce_se
= MLX5_WQE_CTRL_CQ_UPDATE
;
425 cseg
->imm
= rq
->mkey_be
;
427 ucseg
->flags
= MLX5_UMR_TRANSLATION_OFFSET_EN
;
428 ucseg
->xlt_octowords
=
429 cpu_to_be16(MLX5_MTT_OCTW(MLX5_MPWRQ_PAGES_PER_WQE
));
430 ucseg
->bsf_octowords
=
431 cpu_to_be16(MLX5_MTT_OCTW(umr_wqe_mtt_offset
));
432 ucseg
->mkey_mask
= cpu_to_be64(MLX5_MKEY_MASK_FREE
);
434 dseg
->lkey
= sq
->mkey_be
;
435 dseg
->addr
= cpu_to_be64(wi
->umr
.mtt_addr
);
438 static int mlx5e_rq_alloc_mpwqe_info(struct mlx5e_rq
*rq
,
439 struct mlx5e_channel
*c
)
441 int wq_sz
= mlx5_wq_ll_get_size(&rq
->wq
);
442 int mtt_sz
= mlx5e_get_wqe_mtt_sz();
443 int mtt_alloc
= mtt_sz
+ MLX5_UMR_ALIGN
- 1;
446 rq
->mpwqe
.info
= kzalloc_node(wq_sz
* sizeof(*rq
->mpwqe
.info
),
447 GFP_KERNEL
, cpu_to_node(c
->cpu
));
451 /* We allocate more than mtt_sz as we will align the pointer */
452 rq
->mpwqe
.mtt_no_align
= kzalloc_node(mtt_alloc
* wq_sz
, GFP_KERNEL
,
453 cpu_to_node(c
->cpu
));
454 if (unlikely(!rq
->mpwqe
.mtt_no_align
))
455 goto err_free_wqe_info
;
457 for (i
= 0; i
< wq_sz
; i
++) {
458 struct mlx5e_mpw_info
*wi
= &rq
->mpwqe
.info
[i
];
460 wi
->umr
.mtt
= PTR_ALIGN(rq
->mpwqe
.mtt_no_align
+ i
* mtt_alloc
,
462 wi
->umr
.mtt_addr
= dma_map_single(c
->pdev
, wi
->umr
.mtt
, mtt_sz
,
464 if (unlikely(dma_mapping_error(c
->pdev
, wi
->umr
.mtt_addr
)))
467 mlx5e_build_umr_wqe(rq
, &c
->icosq
, &wi
->umr
.wqe
, i
);
474 struct mlx5e_mpw_info
*wi
= &rq
->mpwqe
.info
[i
];
476 dma_unmap_single(c
->pdev
, wi
->umr
.mtt_addr
, mtt_sz
,
479 kfree(rq
->mpwqe
.mtt_no_align
);
481 kfree(rq
->mpwqe
.info
);
487 static void mlx5e_rq_free_mpwqe_info(struct mlx5e_rq
*rq
)
489 int wq_sz
= mlx5_wq_ll_get_size(&rq
->wq
);
490 int mtt_sz
= mlx5e_get_wqe_mtt_sz();
493 for (i
= 0; i
< wq_sz
; i
++) {
494 struct mlx5e_mpw_info
*wi
= &rq
->mpwqe
.info
[i
];
496 dma_unmap_single(rq
->pdev
, wi
->umr
.mtt_addr
, mtt_sz
,
499 kfree(rq
->mpwqe
.mtt_no_align
);
500 kfree(rq
->mpwqe
.info
);
503 static int mlx5e_create_umr_mkey(struct mlx5_core_dev
*mdev
,
504 u64 npages
, u8 page_shift
,
505 struct mlx5_core_mkey
*umr_mkey
)
507 int inlen
= MLX5_ST_SZ_BYTES(create_mkey_in
);
512 if (!MLX5E_VALID_NUM_MTTS(npages
))
515 in
= kvzalloc(inlen
, GFP_KERNEL
);
519 mkc
= MLX5_ADDR_OF(create_mkey_in
, in
, memory_key_mkey_entry
);
521 MLX5_SET(mkc
, mkc
, free
, 1);
522 MLX5_SET(mkc
, mkc
, umr_en
, 1);
523 MLX5_SET(mkc
, mkc
, lw
, 1);
524 MLX5_SET(mkc
, mkc
, lr
, 1);
525 MLX5_SET(mkc
, mkc
, access_mode
, MLX5_MKC_ACCESS_MODE_MTT
);
527 MLX5_SET(mkc
, mkc
, qpn
, 0xffffff);
528 MLX5_SET(mkc
, mkc
, pd
, mdev
->mlx5e_res
.pdn
);
529 MLX5_SET64(mkc
, mkc
, len
, npages
<< page_shift
);
530 MLX5_SET(mkc
, mkc
, translations_octword_size
,
531 MLX5_MTT_OCTW(npages
));
532 MLX5_SET(mkc
, mkc
, log_page_size
, page_shift
);
534 err
= mlx5_core_create_mkey(mdev
, umr_mkey
, in
, inlen
);
540 static int mlx5e_create_rq_umr_mkey(struct mlx5_core_dev
*mdev
, struct mlx5e_rq
*rq
)
542 u64 num_mtts
= MLX5E_REQUIRED_MTTS(mlx5_wq_ll_get_size(&rq
->wq
));
544 return mlx5e_create_umr_mkey(mdev
, num_mtts
, PAGE_SHIFT
, &rq
->umr_mkey
);
547 static int mlx5e_alloc_rq(struct mlx5e_channel
*c
,
548 struct mlx5e_params
*params
,
549 struct mlx5e_rq_param
*rqp
,
552 struct mlx5_core_dev
*mdev
= c
->mdev
;
553 void *rqc
= rqp
->rqc
;
554 void *rqc_wq
= MLX5_ADDR_OF(rqc
, rqc
, wq
);
561 rqp
->wq
.db_numa_node
= cpu_to_node(c
->cpu
);
563 err
= mlx5_wq_ll_create(mdev
, &rqp
->wq
, rqc_wq
, &rq
->wq
,
568 rq
->wq
.db
= &rq
->wq
.db
[MLX5_RCV_DBR
];
570 wq_sz
= mlx5_wq_ll_get_size(&rq
->wq
);
572 rq
->wq_type
= params
->rq_wq_type
;
574 rq
->netdev
= c
->netdev
;
575 rq
->tstamp
= c
->tstamp
;
576 rq
->clock
= &mdev
->clock
;
581 rq
->xdp_prog
= params
->xdp_prog
? bpf_prog_inc(params
->xdp_prog
) : NULL
;
582 if (IS_ERR(rq
->xdp_prog
)) {
583 err
= PTR_ERR(rq
->xdp_prog
);
585 goto err_rq_wq_destroy
;
588 rq
->buff
.map_dir
= rq
->xdp_prog
? DMA_BIDIRECTIONAL
: DMA_FROM_DEVICE
;
589 rq
->buff
.headroom
= params
->rq_headroom
;
591 switch (rq
->wq_type
) {
592 case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ
:
594 rq
->post_wqes
= mlx5e_post_rx_mpwqes
;
595 rq
->dealloc_wqe
= mlx5e_dealloc_rx_mpwqe
;
597 rq
->handle_rx_cqe
= c
->priv
->profile
->rx_handlers
.handle_rx_cqe_mpwqe
;
598 #ifdef CONFIG_MLX5_EN_IPSEC
599 if (MLX5_IPSEC_DEV(mdev
)) {
601 netdev_err(c
->netdev
, "MPWQE RQ with IPSec offload not supported\n");
602 goto err_rq_wq_destroy
;
605 if (!rq
->handle_rx_cqe
) {
607 netdev_err(c
->netdev
, "RX handler of MPWQE RQ is not set, err %d\n", err
);
608 goto err_rq_wq_destroy
;
611 rq
->mpwqe
.log_stride_sz
= params
->mpwqe_log_stride_sz
;
612 rq
->mpwqe
.num_strides
= BIT(params
->mpwqe_log_num_strides
);
614 byte_count
= rq
->mpwqe
.num_strides
<< rq
->mpwqe
.log_stride_sz
;
616 err
= mlx5e_create_rq_umr_mkey(mdev
, rq
);
618 goto err_rq_wq_destroy
;
619 rq
->mkey_be
= cpu_to_be32(rq
->umr_mkey
.key
);
621 err
= mlx5e_rq_alloc_mpwqe_info(rq
, c
);
623 goto err_destroy_umr_mkey
;
625 default: /* MLX5_WQ_TYPE_LINKED_LIST */
627 kzalloc_node(wq_sz
* sizeof(*rq
->wqe
.frag_info
),
628 GFP_KERNEL
, cpu_to_node(c
->cpu
));
629 if (!rq
->wqe
.frag_info
) {
631 goto err_rq_wq_destroy
;
633 rq
->post_wqes
= mlx5e_post_rx_wqes
;
634 rq
->dealloc_wqe
= mlx5e_dealloc_rx_wqe
;
636 #ifdef CONFIG_MLX5_EN_IPSEC
638 rq
->handle_rx_cqe
= mlx5e_ipsec_handle_rx_cqe
;
641 rq
->handle_rx_cqe
= c
->priv
->profile
->rx_handlers
.handle_rx_cqe
;
642 if (!rq
->handle_rx_cqe
) {
643 kfree(rq
->wqe
.frag_info
);
645 netdev_err(c
->netdev
, "RX handler of RQ is not set, err %d\n", err
);
646 goto err_rq_wq_destroy
;
649 byte_count
= params
->lro_en
?
651 MLX5E_SW2HW_MTU(c
->priv
, c
->netdev
->mtu
);
652 #ifdef CONFIG_MLX5_EN_IPSEC
653 if (MLX5_IPSEC_DEV(mdev
))
654 byte_count
+= MLX5E_METADATA_ETHER_LEN
;
656 rq
->wqe
.page_reuse
= !params
->xdp_prog
&& !params
->lro_en
;
658 /* calc the required page order */
659 rq
->wqe
.frag_sz
= MLX5_SKB_FRAG_SZ(rq
->buff
.headroom
+ byte_count
);
660 npages
= DIV_ROUND_UP(rq
->wqe
.frag_sz
, PAGE_SIZE
);
661 rq
->buff
.page_order
= order_base_2(npages
);
663 byte_count
|= MLX5_HW_START_PADDING
;
664 rq
->mkey_be
= c
->mkey_be
;
667 for (i
= 0; i
< wq_sz
; i
++) {
668 struct mlx5e_rx_wqe
*wqe
= mlx5_wq_ll_get_wqe(&rq
->wq
, i
);
670 if (rq
->wq_type
== MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ
) {
671 u64 dma_offset
= (u64
)mlx5e_get_wqe_mtt_offset(rq
, i
) << PAGE_SHIFT
;
673 wqe
->data
.addr
= cpu_to_be64(dma_offset
);
676 wqe
->data
.byte_count
= cpu_to_be32(byte_count
);
677 wqe
->data
.lkey
= rq
->mkey_be
;
680 INIT_WORK(&rq
->am
.work
, mlx5e_rx_am_work
);
681 rq
->am
.mode
= params
->rx_cq_moderation
.cq_period_mode
;
682 rq
->page_cache
.head
= 0;
683 rq
->page_cache
.tail
= 0;
687 err_destroy_umr_mkey
:
688 mlx5_core_destroy_mkey(mdev
, &rq
->umr_mkey
);
692 bpf_prog_put(rq
->xdp_prog
);
693 mlx5_wq_destroy(&rq
->wq_ctrl
);
698 static void mlx5e_free_rq(struct mlx5e_rq
*rq
)
703 bpf_prog_put(rq
->xdp_prog
);
705 switch (rq
->wq_type
) {
706 case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ
:
707 mlx5e_rq_free_mpwqe_info(rq
);
708 mlx5_core_destroy_mkey(rq
->mdev
, &rq
->umr_mkey
);
710 default: /* MLX5_WQ_TYPE_LINKED_LIST */
711 kfree(rq
->wqe
.frag_info
);
714 for (i
= rq
->page_cache
.head
; i
!= rq
->page_cache
.tail
;
715 i
= (i
+ 1) & (MLX5E_CACHE_SIZE
- 1)) {
716 struct mlx5e_dma_info
*dma_info
= &rq
->page_cache
.page_cache
[i
];
718 mlx5e_page_release(rq
, dma_info
, false);
720 mlx5_wq_destroy(&rq
->wq_ctrl
);
723 static int mlx5e_create_rq(struct mlx5e_rq
*rq
,
724 struct mlx5e_rq_param
*param
)
726 struct mlx5_core_dev
*mdev
= rq
->mdev
;
734 inlen
= MLX5_ST_SZ_BYTES(create_rq_in
) +
735 sizeof(u64
) * rq
->wq_ctrl
.buf
.npages
;
736 in
= kvzalloc(inlen
, GFP_KERNEL
);
740 rqc
= MLX5_ADDR_OF(create_rq_in
, in
, ctx
);
741 wq
= MLX5_ADDR_OF(rqc
, rqc
, wq
);
743 memcpy(rqc
, param
->rqc
, sizeof(param
->rqc
));
745 MLX5_SET(rqc
, rqc
, cqn
, rq
->cq
.mcq
.cqn
);
746 MLX5_SET(rqc
, rqc
, state
, MLX5_RQC_STATE_RST
);
747 MLX5_SET(wq
, wq
, log_wq_pg_sz
, rq
->wq_ctrl
.buf
.page_shift
-
748 MLX5_ADAPTER_PAGE_SHIFT
);
749 MLX5_SET64(wq
, wq
, dbr_addr
, rq
->wq_ctrl
.db
.dma
);
751 mlx5_fill_page_array(&rq
->wq_ctrl
.buf
,
752 (__be64
*)MLX5_ADDR_OF(wq
, wq
, pas
));
754 err
= mlx5_core_create_rq(mdev
, in
, inlen
, &rq
->rqn
);
761 static int mlx5e_modify_rq_state(struct mlx5e_rq
*rq
, int curr_state
,
764 struct mlx5e_channel
*c
= rq
->channel
;
765 struct mlx5_core_dev
*mdev
= c
->mdev
;
772 inlen
= MLX5_ST_SZ_BYTES(modify_rq_in
);
773 in
= kvzalloc(inlen
, GFP_KERNEL
);
777 rqc
= MLX5_ADDR_OF(modify_rq_in
, in
, ctx
);
779 MLX5_SET(modify_rq_in
, in
, rq_state
, curr_state
);
780 MLX5_SET(rqc
, rqc
, state
, next_state
);
782 err
= mlx5_core_modify_rq(mdev
, rq
->rqn
, in
, inlen
);
789 static int mlx5e_modify_rq_scatter_fcs(struct mlx5e_rq
*rq
, bool enable
)
791 struct mlx5e_channel
*c
= rq
->channel
;
792 struct mlx5e_priv
*priv
= c
->priv
;
793 struct mlx5_core_dev
*mdev
= priv
->mdev
;
800 inlen
= MLX5_ST_SZ_BYTES(modify_rq_in
);
801 in
= kvzalloc(inlen
, GFP_KERNEL
);
805 rqc
= MLX5_ADDR_OF(modify_rq_in
, in
, ctx
);
807 MLX5_SET(modify_rq_in
, in
, rq_state
, MLX5_RQC_STATE_RDY
);
808 MLX5_SET64(modify_rq_in
, in
, modify_bitmask
,
809 MLX5_MODIFY_RQ_IN_MODIFY_BITMASK_SCATTER_FCS
);
810 MLX5_SET(rqc
, rqc
, scatter_fcs
, enable
);
811 MLX5_SET(rqc
, rqc
, state
, MLX5_RQC_STATE_RDY
);
813 err
= mlx5_core_modify_rq(mdev
, rq
->rqn
, in
, inlen
);
820 static int mlx5e_modify_rq_vsd(struct mlx5e_rq
*rq
, bool vsd
)
822 struct mlx5e_channel
*c
= rq
->channel
;
823 struct mlx5_core_dev
*mdev
= c
->mdev
;
829 inlen
= MLX5_ST_SZ_BYTES(modify_rq_in
);
830 in
= kvzalloc(inlen
, GFP_KERNEL
);
834 rqc
= MLX5_ADDR_OF(modify_rq_in
, in
, ctx
);
836 MLX5_SET(modify_rq_in
, in
, rq_state
, MLX5_RQC_STATE_RDY
);
837 MLX5_SET64(modify_rq_in
, in
, modify_bitmask
,
838 MLX5_MODIFY_RQ_IN_MODIFY_BITMASK_VSD
);
839 MLX5_SET(rqc
, rqc
, vsd
, vsd
);
840 MLX5_SET(rqc
, rqc
, state
, MLX5_RQC_STATE_RDY
);
842 err
= mlx5_core_modify_rq(mdev
, rq
->rqn
, in
, inlen
);
849 static void mlx5e_destroy_rq(struct mlx5e_rq
*rq
)
851 mlx5_core_destroy_rq(rq
->mdev
, rq
->rqn
);
854 static int mlx5e_wait_for_min_rx_wqes(struct mlx5e_rq
*rq
)
856 unsigned long exp_time
= jiffies
+ msecs_to_jiffies(20000);
857 struct mlx5e_channel
*c
= rq
->channel
;
859 struct mlx5_wq_ll
*wq
= &rq
->wq
;
860 u16 min_wqes
= mlx5_min_rx_wqes(rq
->wq_type
, mlx5_wq_ll_get_size(wq
));
862 while (time_before(jiffies
, exp_time
)) {
863 if (wq
->cur_sz
>= min_wqes
)
869 netdev_warn(c
->netdev
, "Failed to get min RX wqes on RQN[0x%x] wq cur_sz(%d) min_rx_wqes(%d)\n",
870 rq
->rqn
, wq
->cur_sz
, min_wqes
);
874 static void mlx5e_free_rx_descs(struct mlx5e_rq
*rq
)
876 struct mlx5_wq_ll
*wq
= &rq
->wq
;
877 struct mlx5e_rx_wqe
*wqe
;
881 /* UMR WQE (if in progress) is always at wq->head */
882 if (rq
->wq_type
== MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ
&&
883 rq
->mpwqe
.umr_in_progress
)
884 mlx5e_free_rx_mpwqe(rq
, &rq
->mpwqe
.info
[wq
->head
]);
886 while (!mlx5_wq_ll_is_empty(wq
)) {
887 wqe_ix_be
= *wq
->tail_next
;
888 wqe_ix
= be16_to_cpu(wqe_ix_be
);
889 wqe
= mlx5_wq_ll_get_wqe(&rq
->wq
, wqe_ix
);
890 rq
->dealloc_wqe(rq
, wqe_ix
);
891 mlx5_wq_ll_pop(&rq
->wq
, wqe_ix_be
,
892 &wqe
->next
.next_wqe_index
);
895 if (rq
->wq_type
== MLX5_WQ_TYPE_LINKED_LIST
&& rq
->wqe
.page_reuse
) {
896 /* Clean outstanding pages on handled WQEs that decided to do page-reuse,
897 * but yet to be re-posted.
899 int wq_sz
= mlx5_wq_ll_get_size(&rq
->wq
);
901 for (wqe_ix
= 0; wqe_ix
< wq_sz
; wqe_ix
++)
902 rq
->dealloc_wqe(rq
, wqe_ix
);
906 static int mlx5e_open_rq(struct mlx5e_channel
*c
,
907 struct mlx5e_params
*params
,
908 struct mlx5e_rq_param
*param
,
913 err
= mlx5e_alloc_rq(c
, params
, param
, rq
);
917 err
= mlx5e_create_rq(rq
, param
);
921 err
= mlx5e_modify_rq_state(rq
, MLX5_RQC_STATE_RST
, MLX5_RQC_STATE_RDY
);
925 if (params
->rx_am_enabled
)
926 c
->rq
.state
|= BIT(MLX5E_RQ_STATE_AM
);
928 if (MLX5_CAP_ETH(c
->mdev
, cqe_checksum_full
))
929 __set_bit(MLX5E_RQ_STATE_CSUM_FULL
, &c
->rq
.state
);
931 /* We disable csum_complete when XDP is enabled since
932 * XDP programs might manipulate packets which will render
933 * skb->checksum incorrect.
935 if (MLX5E_GET_PFLAG(params
, MLX5E_PFLAG_RX_NO_CSUM_COMPLETE
) || c
->xdp
)
936 __set_bit(MLX5E_RQ_STATE_NO_CSUM_COMPLETE
, &c
->rq
.state
);
941 mlx5e_destroy_rq(rq
);
948 static void mlx5e_activate_rq(struct mlx5e_rq
*rq
)
950 struct mlx5e_icosq
*sq
= &rq
->channel
->icosq
;
951 u16 pi
= sq
->pc
& sq
->wq
.sz_m1
;
952 struct mlx5e_tx_wqe
*nopwqe
;
954 set_bit(MLX5E_RQ_STATE_ENABLED
, &rq
->state
);
955 sq
->db
.ico_wqe
[pi
].opcode
= MLX5_OPCODE_NOP
;
956 nopwqe
= mlx5e_post_nop(&sq
->wq
, sq
->sqn
, &sq
->pc
);
957 mlx5e_notify_hw(&sq
->wq
, sq
->pc
, sq
->uar_map
, &nopwqe
->ctrl
);
960 static void mlx5e_deactivate_rq(struct mlx5e_rq
*rq
)
962 clear_bit(MLX5E_RQ_STATE_ENABLED
, &rq
->state
);
963 napi_synchronize(&rq
->channel
->napi
); /* prevent mlx5e_post_rx_wqes */
966 static void mlx5e_close_rq(struct mlx5e_rq
*rq
)
968 cancel_work_sync(&rq
->am
.work
);
969 mlx5e_destroy_rq(rq
);
970 mlx5e_free_rx_descs(rq
);
974 static void mlx5e_free_xdpsq_db(struct mlx5e_xdpsq
*sq
)
979 static int mlx5e_alloc_xdpsq_db(struct mlx5e_xdpsq
*sq
, int numa
)
981 int wq_sz
= mlx5_wq_cyc_get_size(&sq
->wq
);
983 sq
->db
.di
= kzalloc_node(sizeof(*sq
->db
.di
) * wq_sz
,
986 mlx5e_free_xdpsq_db(sq
);
993 static int mlx5e_alloc_xdpsq(struct mlx5e_channel
*c
,
994 struct mlx5e_params
*params
,
995 struct mlx5e_sq_param
*param
,
996 struct mlx5e_xdpsq
*sq
)
998 void *sqc_wq
= MLX5_ADDR_OF(sqc
, param
->sqc
, wq
);
999 struct mlx5_core_dev
*mdev
= c
->mdev
;
1003 sq
->mkey_be
= c
->mkey_be
;
1005 sq
->uar_map
= mdev
->mlx5e_res
.bfreg
.map
;
1006 sq
->min_inline_mode
= params
->tx_min_inline_mode
;
1008 param
->wq
.db_numa_node
= cpu_to_node(c
->cpu
);
1009 err
= mlx5_wq_cyc_create(mdev
, ¶m
->wq
, sqc_wq
, &sq
->wq
, &sq
->wq_ctrl
);
1012 sq
->wq
.db
= &sq
->wq
.db
[MLX5_SND_DBR
];
1014 err
= mlx5e_alloc_xdpsq_db(sq
, cpu_to_node(c
->cpu
));
1016 goto err_sq_wq_destroy
;
1021 mlx5_wq_destroy(&sq
->wq_ctrl
);
1026 static void mlx5e_free_xdpsq(struct mlx5e_xdpsq
*sq
)
1028 mlx5e_free_xdpsq_db(sq
);
1029 mlx5_wq_destroy(&sq
->wq_ctrl
);
1032 static void mlx5e_free_icosq_db(struct mlx5e_icosq
*sq
)
1034 kfree(sq
->db
.ico_wqe
);
1037 static int mlx5e_alloc_icosq_db(struct mlx5e_icosq
*sq
, int numa
)
1039 u8 wq_sz
= mlx5_wq_cyc_get_size(&sq
->wq
);
1041 sq
->db
.ico_wqe
= kzalloc_node(sizeof(*sq
->db
.ico_wqe
) * wq_sz
,
1043 if (!sq
->db
.ico_wqe
)
1049 static int mlx5e_alloc_icosq(struct mlx5e_channel
*c
,
1050 struct mlx5e_sq_param
*param
,
1051 struct mlx5e_icosq
*sq
)
1053 void *sqc_wq
= MLX5_ADDR_OF(sqc
, param
->sqc
, wq
);
1054 struct mlx5_core_dev
*mdev
= c
->mdev
;
1057 sq
->mkey_be
= c
->mkey_be
;
1059 sq
->uar_map
= mdev
->mlx5e_res
.bfreg
.map
;
1061 param
->wq
.db_numa_node
= cpu_to_node(c
->cpu
);
1062 err
= mlx5_wq_cyc_create(mdev
, ¶m
->wq
, sqc_wq
, &sq
->wq
, &sq
->wq_ctrl
);
1065 sq
->wq
.db
= &sq
->wq
.db
[MLX5_SND_DBR
];
1067 err
= mlx5e_alloc_icosq_db(sq
, cpu_to_node(c
->cpu
));
1069 goto err_sq_wq_destroy
;
1071 sq
->edge
= (sq
->wq
.sz_m1
+ 1) - MLX5E_ICOSQ_MAX_WQEBBS
;
1076 mlx5_wq_destroy(&sq
->wq_ctrl
);
1081 static void mlx5e_free_icosq(struct mlx5e_icosq
*sq
)
1083 mlx5e_free_icosq_db(sq
);
1084 mlx5_wq_destroy(&sq
->wq_ctrl
);
1087 static void mlx5e_free_txqsq_db(struct mlx5e_txqsq
*sq
)
1089 kfree(sq
->db
.wqe_info
);
1090 kfree(sq
->db
.dma_fifo
);
1093 static int mlx5e_alloc_txqsq_db(struct mlx5e_txqsq
*sq
, int numa
)
1095 int wq_sz
= mlx5_wq_cyc_get_size(&sq
->wq
);
1096 int df_sz
= wq_sz
* MLX5_SEND_WQEBB_NUM_DS
;
1098 sq
->db
.dma_fifo
= kzalloc_node(df_sz
* sizeof(*sq
->db
.dma_fifo
),
1100 sq
->db
.wqe_info
= kzalloc_node(wq_sz
* sizeof(*sq
->db
.wqe_info
),
1102 if (!sq
->db
.dma_fifo
|| !sq
->db
.wqe_info
) {
1103 mlx5e_free_txqsq_db(sq
);
1107 sq
->dma_fifo_mask
= df_sz
- 1;
1112 static int mlx5e_alloc_txqsq(struct mlx5e_channel
*c
,
1114 struct mlx5e_params
*params
,
1115 struct mlx5e_sq_param
*param
,
1116 struct mlx5e_txqsq
*sq
)
1118 void *sqc_wq
= MLX5_ADDR_OF(sqc
, param
->sqc
, wq
);
1119 struct mlx5_core_dev
*mdev
= c
->mdev
;
1123 sq
->tstamp
= c
->tstamp
;
1124 sq
->clock
= &mdev
->clock
;
1125 sq
->mkey_be
= c
->mkey_be
;
1127 sq
->txq_ix
= txq_ix
;
1128 sq
->uar_map
= mdev
->mlx5e_res
.bfreg
.map
;
1129 sq
->max_inline
= params
->tx_max_inline
;
1130 sq
->min_inline_mode
= params
->tx_min_inline_mode
;
1131 if (MLX5_IPSEC_DEV(c
->priv
->mdev
))
1132 set_bit(MLX5E_SQ_STATE_IPSEC
, &sq
->state
);
1134 param
->wq
.db_numa_node
= cpu_to_node(c
->cpu
);
1135 err
= mlx5_wq_cyc_create(mdev
, ¶m
->wq
, sqc_wq
, &sq
->wq
, &sq
->wq_ctrl
);
1138 sq
->wq
.db
= &sq
->wq
.db
[MLX5_SND_DBR
];
1140 err
= mlx5e_alloc_txqsq_db(sq
, cpu_to_node(c
->cpu
));
1142 goto err_sq_wq_destroy
;
1144 sq
->edge
= (sq
->wq
.sz_m1
+ 1) - MLX5_SEND_WQE_MAX_WQEBBS
;
1149 mlx5_wq_destroy(&sq
->wq_ctrl
);
1154 static void mlx5e_free_txqsq(struct mlx5e_txqsq
*sq
)
1156 mlx5e_free_txqsq_db(sq
);
1157 mlx5_wq_destroy(&sq
->wq_ctrl
);
1160 struct mlx5e_create_sq_param
{
1161 struct mlx5_wq_ctrl
*wq_ctrl
;
1168 static int mlx5e_create_sq(struct mlx5_core_dev
*mdev
,
1169 struct mlx5e_sq_param
*param
,
1170 struct mlx5e_create_sq_param
*csp
,
1179 inlen
= MLX5_ST_SZ_BYTES(create_sq_in
) +
1180 sizeof(u64
) * csp
->wq_ctrl
->buf
.npages
;
1181 in
= kvzalloc(inlen
, GFP_KERNEL
);
1185 sqc
= MLX5_ADDR_OF(create_sq_in
, in
, ctx
);
1186 wq
= MLX5_ADDR_OF(sqc
, sqc
, wq
);
1188 memcpy(sqc
, param
->sqc
, sizeof(param
->sqc
));
1189 MLX5_SET(sqc
, sqc
, tis_lst_sz
, csp
->tis_lst_sz
);
1190 MLX5_SET(sqc
, sqc
, tis_num_0
, csp
->tisn
);
1191 MLX5_SET(sqc
, sqc
, cqn
, csp
->cqn
);
1193 if (MLX5_CAP_ETH(mdev
, wqe_inline_mode
) == MLX5_CAP_INLINE_MODE_VPORT_CONTEXT
)
1194 MLX5_SET(sqc
, sqc
, min_wqe_inline_mode
, csp
->min_inline_mode
);
1196 MLX5_SET(sqc
, sqc
, state
, MLX5_SQC_STATE_RST
);
1198 MLX5_SET(wq
, wq
, wq_type
, MLX5_WQ_TYPE_CYCLIC
);
1199 MLX5_SET(wq
, wq
, uar_page
, mdev
->mlx5e_res
.bfreg
.index
);
1200 MLX5_SET(wq
, wq
, log_wq_pg_sz
, csp
->wq_ctrl
->buf
.page_shift
-
1201 MLX5_ADAPTER_PAGE_SHIFT
);
1202 MLX5_SET64(wq
, wq
, dbr_addr
, csp
->wq_ctrl
->db
.dma
);
1204 mlx5_fill_page_array(&csp
->wq_ctrl
->buf
, (__be64
*)MLX5_ADDR_OF(wq
, wq
, pas
));
1206 err
= mlx5_core_create_sq(mdev
, in
, inlen
, sqn
);
1213 struct mlx5e_modify_sq_param
{
1220 static int mlx5e_modify_sq(struct mlx5_core_dev
*mdev
, u32 sqn
,
1221 struct mlx5e_modify_sq_param
*p
)
1228 inlen
= MLX5_ST_SZ_BYTES(modify_sq_in
);
1229 in
= kvzalloc(inlen
, GFP_KERNEL
);
1233 sqc
= MLX5_ADDR_OF(modify_sq_in
, in
, ctx
);
1235 MLX5_SET(modify_sq_in
, in
, sq_state
, p
->curr_state
);
1236 MLX5_SET(sqc
, sqc
, state
, p
->next_state
);
1237 if (p
->rl_update
&& p
->next_state
== MLX5_SQC_STATE_RDY
) {
1238 MLX5_SET64(modify_sq_in
, in
, modify_bitmask
, 1);
1239 MLX5_SET(sqc
, sqc
, packet_pacing_rate_limit_index
, p
->rl_index
);
1242 err
= mlx5_core_modify_sq(mdev
, sqn
, in
, inlen
);
1249 static void mlx5e_destroy_sq(struct mlx5_core_dev
*mdev
, u32 sqn
)
1251 mlx5_core_destroy_sq(mdev
, sqn
);
1254 static int mlx5e_create_sq_rdy(struct mlx5_core_dev
*mdev
,
1255 struct mlx5e_sq_param
*param
,
1256 struct mlx5e_create_sq_param
*csp
,
1259 struct mlx5e_modify_sq_param msp
= {0};
1262 err
= mlx5e_create_sq(mdev
, param
, csp
, sqn
);
1266 msp
.curr_state
= MLX5_SQC_STATE_RST
;
1267 msp
.next_state
= MLX5_SQC_STATE_RDY
;
1268 err
= mlx5e_modify_sq(mdev
, *sqn
, &msp
);
1270 mlx5e_destroy_sq(mdev
, *sqn
);
1275 static int mlx5e_set_sq_maxrate(struct net_device
*dev
,
1276 struct mlx5e_txqsq
*sq
, u32 rate
);
1278 static int mlx5e_open_txqsq(struct mlx5e_channel
*c
,
1281 struct mlx5e_params
*params
,
1282 struct mlx5e_sq_param
*param
,
1283 struct mlx5e_txqsq
*sq
)
1285 struct mlx5e_create_sq_param csp
= {};
1289 err
= mlx5e_alloc_txqsq(c
, txq_ix
, params
, param
, sq
);
1295 csp
.cqn
= sq
->cq
.mcq
.cqn
;
1296 csp
.wq_ctrl
= &sq
->wq_ctrl
;
1297 csp
.min_inline_mode
= sq
->min_inline_mode
;
1298 err
= mlx5e_create_sq_rdy(c
->mdev
, param
, &csp
, &sq
->sqn
);
1300 goto err_free_txqsq
;
1302 tx_rate
= c
->priv
->tx_rates
[sq
->txq_ix
];
1304 mlx5e_set_sq_maxrate(c
->netdev
, sq
, tx_rate
);
1309 clear_bit(MLX5E_SQ_STATE_ENABLED
, &sq
->state
);
1310 mlx5e_free_txqsq(sq
);
1315 static void mlx5e_activate_txqsq(struct mlx5e_txqsq
*sq
)
1317 sq
->txq
= netdev_get_tx_queue(sq
->channel
->netdev
, sq
->txq_ix
);
1318 set_bit(MLX5E_SQ_STATE_ENABLED
, &sq
->state
);
1319 netdev_tx_reset_queue(sq
->txq
);
1320 netif_tx_start_queue(sq
->txq
);
1323 static inline void netif_tx_disable_queue(struct netdev_queue
*txq
)
1325 __netif_tx_lock_bh(txq
);
1326 netif_tx_stop_queue(txq
);
1327 __netif_tx_unlock_bh(txq
);
1330 static void mlx5e_deactivate_txqsq(struct mlx5e_txqsq
*sq
)
1332 struct mlx5e_channel
*c
= sq
->channel
;
1334 clear_bit(MLX5E_SQ_STATE_ENABLED
, &sq
->state
);
1335 /* prevent netif_tx_wake_queue */
1336 napi_synchronize(&c
->napi
);
1338 netif_tx_disable_queue(sq
->txq
);
1340 /* last doorbell out, godspeed .. */
1341 if (mlx5e_wqc_has_room_for(&sq
->wq
, sq
->cc
, sq
->pc
, 1)) {
1342 struct mlx5e_tx_wqe
*nop
;
1344 sq
->db
.wqe_info
[(sq
->pc
& sq
->wq
.sz_m1
)].skb
= NULL
;
1345 nop
= mlx5e_post_nop(&sq
->wq
, sq
->sqn
, &sq
->pc
);
1346 mlx5e_notify_hw(&sq
->wq
, sq
->pc
, sq
->uar_map
, &nop
->ctrl
);
1350 static void mlx5e_close_txqsq(struct mlx5e_txqsq
*sq
)
1352 struct mlx5e_channel
*c
= sq
->channel
;
1353 struct mlx5_core_dev
*mdev
= c
->mdev
;
1355 mlx5e_destroy_sq(mdev
, sq
->sqn
);
1357 mlx5_rl_remove_rate(mdev
, sq
->rate_limit
);
1358 mlx5e_free_txqsq_descs(sq
);
1359 mlx5e_free_txqsq(sq
);
1362 static int mlx5e_open_icosq(struct mlx5e_channel
*c
,
1363 struct mlx5e_params
*params
,
1364 struct mlx5e_sq_param
*param
,
1365 struct mlx5e_icosq
*sq
)
1367 struct mlx5e_create_sq_param csp
= {};
1370 err
= mlx5e_alloc_icosq(c
, param
, sq
);
1374 csp
.cqn
= sq
->cq
.mcq
.cqn
;
1375 csp
.wq_ctrl
= &sq
->wq_ctrl
;
1376 csp
.min_inline_mode
= params
->tx_min_inline_mode
;
1377 set_bit(MLX5E_SQ_STATE_ENABLED
, &sq
->state
);
1378 err
= mlx5e_create_sq_rdy(c
->mdev
, param
, &csp
, &sq
->sqn
);
1380 goto err_free_icosq
;
1385 clear_bit(MLX5E_SQ_STATE_ENABLED
, &sq
->state
);
1386 mlx5e_free_icosq(sq
);
1391 static void mlx5e_close_icosq(struct mlx5e_icosq
*sq
)
1393 struct mlx5e_channel
*c
= sq
->channel
;
1395 clear_bit(MLX5E_SQ_STATE_ENABLED
, &sq
->state
);
1396 napi_synchronize(&c
->napi
);
1398 mlx5e_destroy_sq(c
->mdev
, sq
->sqn
);
1399 mlx5e_free_icosq(sq
);
1402 static int mlx5e_open_xdpsq(struct mlx5e_channel
*c
,
1403 struct mlx5e_params
*params
,
1404 struct mlx5e_sq_param
*param
,
1405 struct mlx5e_xdpsq
*sq
)
1407 unsigned int ds_cnt
= MLX5E_XDP_TX_DS_COUNT
;
1408 struct mlx5e_create_sq_param csp
= {};
1409 unsigned int inline_hdr_sz
= 0;
1413 err
= mlx5e_alloc_xdpsq(c
, params
, param
, sq
);
1418 csp
.tisn
= c
->priv
->tisn
[0]; /* tc = 0 */
1419 csp
.cqn
= sq
->cq
.mcq
.cqn
;
1420 csp
.wq_ctrl
= &sq
->wq_ctrl
;
1421 csp
.min_inline_mode
= sq
->min_inline_mode
;
1422 set_bit(MLX5E_SQ_STATE_ENABLED
, &sq
->state
);
1423 err
= mlx5e_create_sq_rdy(c
->mdev
, param
, &csp
, &sq
->sqn
);
1425 goto err_free_xdpsq
;
1427 if (sq
->min_inline_mode
!= MLX5_INLINE_MODE_NONE
) {
1428 inline_hdr_sz
= MLX5E_XDP_MIN_INLINE
;
1432 /* Pre initialize fixed WQE fields */
1433 for (i
= 0; i
< mlx5_wq_cyc_get_size(&sq
->wq
); i
++) {
1434 struct mlx5e_tx_wqe
*wqe
= mlx5_wq_cyc_get_wqe(&sq
->wq
, i
);
1435 struct mlx5_wqe_ctrl_seg
*cseg
= &wqe
->ctrl
;
1436 struct mlx5_wqe_eth_seg
*eseg
= &wqe
->eth
;
1437 struct mlx5_wqe_data_seg
*dseg
;
1439 cseg
->qpn_ds
= cpu_to_be32((sq
->sqn
<< 8) | ds_cnt
);
1440 eseg
->inline_hdr
.sz
= cpu_to_be16(inline_hdr_sz
);
1442 dseg
= (struct mlx5_wqe_data_seg
*)cseg
+ (ds_cnt
- 1);
1443 dseg
->lkey
= sq
->mkey_be
;
1449 clear_bit(MLX5E_SQ_STATE_ENABLED
, &sq
->state
);
1450 mlx5e_free_xdpsq(sq
);
1455 static void mlx5e_close_xdpsq(struct mlx5e_xdpsq
*sq
)
1457 struct mlx5e_channel
*c
= sq
->channel
;
1459 clear_bit(MLX5E_SQ_STATE_ENABLED
, &sq
->state
);
1460 napi_synchronize(&c
->napi
);
1462 mlx5e_destroy_sq(c
->mdev
, sq
->sqn
);
1463 mlx5e_free_xdpsq_descs(sq
);
1464 mlx5e_free_xdpsq(sq
);
1467 static int mlx5e_alloc_cq_common(struct mlx5_core_dev
*mdev
,
1468 struct mlx5e_cq_param
*param
,
1469 struct mlx5e_cq
*cq
)
1471 struct mlx5_core_cq
*mcq
= &cq
->mcq
;
1477 err
= mlx5_vector2eqn(mdev
, param
->eq_ix
, &eqn_not_used
, &irqn
);
1481 err
= mlx5_cqwq_create(mdev
, ¶m
->wq
, param
->cqc
, &cq
->wq
,
1487 mcq
->set_ci_db
= cq
->wq_ctrl
.db
.db
;
1488 mcq
->arm_db
= cq
->wq_ctrl
.db
.db
+ 1;
1489 *mcq
->set_ci_db
= 0;
1491 mcq
->vector
= param
->eq_ix
;
1492 mcq
->comp
= mlx5e_completion_event
;
1493 mcq
->event
= mlx5e_cq_error_event
;
1496 for (i
= 0; i
< mlx5_cqwq_get_size(&cq
->wq
); i
++) {
1497 struct mlx5_cqe64
*cqe
= mlx5_cqwq_get_wqe(&cq
->wq
, i
);
1507 static int mlx5e_alloc_cq(struct mlx5e_channel
*c
,
1508 struct mlx5e_cq_param
*param
,
1509 struct mlx5e_cq
*cq
)
1511 struct mlx5_core_dev
*mdev
= c
->priv
->mdev
;
1514 param
->wq
.buf_numa_node
= cpu_to_node(c
->cpu
);
1515 param
->wq
.db_numa_node
= cpu_to_node(c
->cpu
);
1516 param
->eq_ix
= c
->ix
;
1518 err
= mlx5e_alloc_cq_common(mdev
, param
, cq
);
1520 cq
->napi
= &c
->napi
;
1526 static void mlx5e_free_cq(struct mlx5e_cq
*cq
)
1528 mlx5_cqwq_destroy(&cq
->wq_ctrl
);
1531 static int mlx5e_create_cq(struct mlx5e_cq
*cq
, struct mlx5e_cq_param
*param
)
1533 struct mlx5_core_dev
*mdev
= cq
->mdev
;
1534 struct mlx5_core_cq
*mcq
= &cq
->mcq
;
1539 unsigned int irqn_not_used
;
1543 err
= mlx5_vector2eqn(mdev
, param
->eq_ix
, &eqn
, &irqn_not_used
);
1547 inlen
= MLX5_ST_SZ_BYTES(create_cq_in
) +
1548 sizeof(u64
) * cq
->wq_ctrl
.frag_buf
.npages
;
1549 in
= kvzalloc(inlen
, GFP_KERNEL
);
1553 cqc
= MLX5_ADDR_OF(create_cq_in
, in
, cq_context
);
1555 memcpy(cqc
, param
->cqc
, sizeof(param
->cqc
));
1557 mlx5_fill_page_frag_array(&cq
->wq_ctrl
.frag_buf
,
1558 (__be64
*)MLX5_ADDR_OF(create_cq_in
, in
, pas
));
1560 MLX5_SET(cqc
, cqc
, cq_period_mode
, param
->cq_period_mode
);
1561 MLX5_SET(cqc
, cqc
, c_eqn
, eqn
);
1562 MLX5_SET(cqc
, cqc
, uar_page
, mdev
->priv
.uar
->index
);
1563 MLX5_SET(cqc
, cqc
, log_page_size
, cq
->wq_ctrl
.frag_buf
.page_shift
-
1564 MLX5_ADAPTER_PAGE_SHIFT
);
1565 MLX5_SET64(cqc
, cqc
, dbr_addr
, cq
->wq_ctrl
.db
.dma
);
1567 err
= mlx5_core_create_cq(mdev
, mcq
, in
, inlen
);
1579 static void mlx5e_destroy_cq(struct mlx5e_cq
*cq
)
1581 mlx5_core_destroy_cq(cq
->mdev
, &cq
->mcq
);
1584 static int mlx5e_open_cq(struct mlx5e_channel
*c
,
1585 struct mlx5e_cq_moder moder
,
1586 struct mlx5e_cq_param
*param
,
1587 struct mlx5e_cq
*cq
)
1589 struct mlx5_core_dev
*mdev
= c
->mdev
;
1592 err
= mlx5e_alloc_cq(c
, param
, cq
);
1596 err
= mlx5e_create_cq(cq
, param
);
1600 if (MLX5_CAP_GEN(mdev
, cq_moderation
))
1601 mlx5_core_modify_cq_moderation(mdev
, &cq
->mcq
, moder
.usec
, moder
.pkts
);
1610 static void mlx5e_close_cq(struct mlx5e_cq
*cq
)
1612 mlx5e_destroy_cq(cq
);
1616 static int mlx5e_get_cpu(struct mlx5e_priv
*priv
, int ix
)
1618 return cpumask_first(priv
->mdev
->priv
.irq_info
[ix
+ MLX5_EQ_VEC_COMP_BASE
].mask
);
1621 static int mlx5e_open_tx_cqs(struct mlx5e_channel
*c
,
1622 struct mlx5e_params
*params
,
1623 struct mlx5e_channel_param
*cparam
)
1628 for (tc
= 0; tc
< c
->num_tc
; tc
++) {
1629 err
= mlx5e_open_cq(c
, params
->tx_cq_moderation
,
1630 &cparam
->tx_cq
, &c
->sq
[tc
].cq
);
1632 goto err_close_tx_cqs
;
1638 for (tc
--; tc
>= 0; tc
--)
1639 mlx5e_close_cq(&c
->sq
[tc
].cq
);
1644 static void mlx5e_close_tx_cqs(struct mlx5e_channel
*c
)
1648 for (tc
= 0; tc
< c
->num_tc
; tc
++)
1649 mlx5e_close_cq(&c
->sq
[tc
].cq
);
1652 static int mlx5e_open_sqs(struct mlx5e_channel
*c
,
1653 struct mlx5e_params
*params
,
1654 struct mlx5e_channel_param
*cparam
)
1659 for (tc
= 0; tc
< params
->num_tc
; tc
++) {
1660 int txq_ix
= c
->ix
+ tc
* params
->num_channels
;
1662 err
= mlx5e_open_txqsq(c
, c
->priv
->tisn
[tc
], txq_ix
,
1663 params
, &cparam
->sq
, &c
->sq
[tc
]);
1671 for (tc
--; tc
>= 0; tc
--)
1672 mlx5e_close_txqsq(&c
->sq
[tc
]);
1677 static void mlx5e_close_sqs(struct mlx5e_channel
*c
)
1681 for (tc
= 0; tc
< c
->num_tc
; tc
++)
1682 mlx5e_close_txqsq(&c
->sq
[tc
]);
1685 static int mlx5e_set_sq_maxrate(struct net_device
*dev
,
1686 struct mlx5e_txqsq
*sq
, u32 rate
)
1688 struct mlx5e_priv
*priv
= netdev_priv(dev
);
1689 struct mlx5_core_dev
*mdev
= priv
->mdev
;
1690 struct mlx5e_modify_sq_param msp
= {0};
1694 if (rate
== sq
->rate_limit
)
1699 /* remove current rl index to free space to next ones */
1700 mlx5_rl_remove_rate(mdev
, sq
->rate_limit
);
1705 err
= mlx5_rl_add_rate(mdev
, rate
, &rl_index
);
1707 netdev_err(dev
, "Failed configuring rate %u: %d\n",
1713 msp
.curr_state
= MLX5_SQC_STATE_RDY
;
1714 msp
.next_state
= MLX5_SQC_STATE_RDY
;
1715 msp
.rl_index
= rl_index
;
1716 msp
.rl_update
= true;
1717 err
= mlx5e_modify_sq(mdev
, sq
->sqn
, &msp
);
1719 netdev_err(dev
, "Failed configuring rate %u: %d\n",
1721 /* remove the rate from the table */
1723 mlx5_rl_remove_rate(mdev
, rate
);
1727 sq
->rate_limit
= rate
;
1731 static int mlx5e_set_tx_maxrate(struct net_device
*dev
, int index
, u32 rate
)
1733 struct mlx5e_priv
*priv
= netdev_priv(dev
);
1734 struct mlx5_core_dev
*mdev
= priv
->mdev
;
1735 struct mlx5e_txqsq
*sq
= priv
->txq2sq
[index
];
1738 if (!mlx5_rl_is_supported(mdev
)) {
1739 netdev_err(dev
, "Rate limiting is not supported on this device\n");
1743 /* rate is given in Mb/sec, HW config is in Kb/sec */
1746 /* Check whether rate in valid range, 0 is always valid */
1747 if (rate
&& !mlx5_rl_is_in_range(mdev
, rate
)) {
1748 netdev_err(dev
, "TX rate %u, is not in range\n", rate
);
1752 mutex_lock(&priv
->state_lock
);
1753 if (test_bit(MLX5E_STATE_OPENED
, &priv
->state
))
1754 err
= mlx5e_set_sq_maxrate(dev
, sq
, rate
);
1756 priv
->tx_rates
[index
] = rate
;
1757 mutex_unlock(&priv
->state_lock
);
1762 static int mlx5e_open_channel(struct mlx5e_priv
*priv
, int ix
,
1763 struct mlx5e_params
*params
,
1764 struct mlx5e_channel_param
*cparam
,
1765 struct mlx5e_channel
**cp
)
1767 struct mlx5e_cq_moder icocq_moder
= {0, 0};
1768 struct net_device
*netdev
= priv
->netdev
;
1769 int cpu
= mlx5e_get_cpu(priv
, ix
);
1770 struct mlx5e_channel
*c
;
1775 err
= mlx5_vector2eqn(priv
->mdev
, ix
, &eqn
, &irq
);
1779 c
= kzalloc_node(sizeof(*c
), GFP_KERNEL
, cpu_to_node(cpu
));
1784 c
->mdev
= priv
->mdev
;
1785 c
->tstamp
= &priv
->tstamp
;
1788 c
->pdev
= &priv
->mdev
->pdev
->dev
;
1789 c
->netdev
= priv
->netdev
;
1790 c
->mkey_be
= cpu_to_be32(priv
->mdev
->mlx5e_res
.mkey
.key
);
1791 c
->num_tc
= params
->num_tc
;
1792 c
->xdp
= !!params
->xdp_prog
;
1794 c
->irq_desc
= irq_to_desc(irq
);
1796 netif_napi_add(netdev
, &c
->napi
, mlx5e_napi_poll
, 64);
1798 err
= mlx5e_open_cq(c
, icocq_moder
, &cparam
->icosq_cq
, &c
->icosq
.cq
);
1802 err
= mlx5e_open_tx_cqs(c
, params
, cparam
);
1804 goto err_close_icosq_cq
;
1806 err
= mlx5e_open_cq(c
, params
->rx_cq_moderation
, &cparam
->rx_cq
, &c
->rq
.cq
);
1808 goto err_close_tx_cqs
;
1810 /* XDP SQ CQ params are same as normal TXQ sq CQ params */
1811 err
= c
->xdp
? mlx5e_open_cq(c
, params
->tx_cq_moderation
,
1812 &cparam
->tx_cq
, &c
->rq
.xdpsq
.cq
) : 0;
1814 goto err_close_rx_cq
;
1816 napi_enable(&c
->napi
);
1818 err
= mlx5e_open_icosq(c
, params
, &cparam
->icosq
, &c
->icosq
);
1820 goto err_disable_napi
;
1822 err
= mlx5e_open_sqs(c
, params
, cparam
);
1824 goto err_close_icosq
;
1826 err
= c
->xdp
? mlx5e_open_xdpsq(c
, params
, &cparam
->xdp_sq
, &c
->rq
.xdpsq
) : 0;
1830 err
= mlx5e_open_rq(c
, params
, &cparam
->rq
, &c
->rq
);
1832 goto err_close_xdp_sq
;
1839 mlx5e_close_xdpsq(&c
->rq
.xdpsq
);
1845 mlx5e_close_icosq(&c
->icosq
);
1848 napi_disable(&c
->napi
);
1850 mlx5e_close_cq(&c
->rq
.xdpsq
.cq
);
1853 mlx5e_close_cq(&c
->rq
.cq
);
1856 mlx5e_close_tx_cqs(c
);
1859 mlx5e_close_cq(&c
->icosq
.cq
);
1862 netif_napi_del(&c
->napi
);
1868 static void mlx5e_activate_channel(struct mlx5e_channel
*c
)
1872 for (tc
= 0; tc
< c
->num_tc
; tc
++)
1873 mlx5e_activate_txqsq(&c
->sq
[tc
]);
1874 mlx5e_activate_rq(&c
->rq
);
1875 netif_set_xps_queue(c
->netdev
, get_cpu_mask(c
->cpu
), c
->ix
);
1878 static void mlx5e_deactivate_channel(struct mlx5e_channel
*c
)
1882 mlx5e_deactivate_rq(&c
->rq
);
1883 for (tc
= 0; tc
< c
->num_tc
; tc
++)
1884 mlx5e_deactivate_txqsq(&c
->sq
[tc
]);
1887 static void mlx5e_close_channel(struct mlx5e_channel
*c
)
1889 mlx5e_close_rq(&c
->rq
);
1891 mlx5e_close_xdpsq(&c
->rq
.xdpsq
);
1893 mlx5e_close_icosq(&c
->icosq
);
1894 napi_disable(&c
->napi
);
1896 mlx5e_close_cq(&c
->rq
.xdpsq
.cq
);
1897 mlx5e_close_cq(&c
->rq
.cq
);
1898 mlx5e_close_tx_cqs(c
);
1899 mlx5e_close_cq(&c
->icosq
.cq
);
1900 netif_napi_del(&c
->napi
);
1905 static void mlx5e_build_rq_param(struct mlx5e_priv
*priv
,
1906 struct mlx5e_params
*params
,
1907 struct mlx5e_rq_param
*param
)
1909 void *rqc
= param
->rqc
;
1910 void *wq
= MLX5_ADDR_OF(rqc
, rqc
, wq
);
1912 switch (params
->rq_wq_type
) {
1913 case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ
:
1914 MLX5_SET(wq
, wq
, log_wqe_num_of_strides
, params
->mpwqe_log_num_strides
- 9);
1915 MLX5_SET(wq
, wq
, log_wqe_stride_size
, params
->mpwqe_log_stride_sz
- 6);
1916 MLX5_SET(wq
, wq
, wq_type
, MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ
);
1918 default: /* MLX5_WQ_TYPE_LINKED_LIST */
1919 MLX5_SET(wq
, wq
, wq_type
, MLX5_WQ_TYPE_LINKED_LIST
);
1922 MLX5_SET(wq
, wq
, end_padding_mode
, MLX5_WQ_END_PAD_MODE_ALIGN
);
1923 MLX5_SET(wq
, wq
, log_wq_stride
, ilog2(sizeof(struct mlx5e_rx_wqe
)));
1924 MLX5_SET(wq
, wq
, log_wq_sz
, params
->log_rq_size
);
1925 MLX5_SET(wq
, wq
, pd
, priv
->mdev
->mlx5e_res
.pdn
);
1926 MLX5_SET(rqc
, rqc
, counter_set_id
, priv
->q_counter
);
1927 MLX5_SET(rqc
, rqc
, vsd
, params
->vlan_strip_disable
);
1928 MLX5_SET(rqc
, rqc
, scatter_fcs
, params
->scatter_fcs_en
);
1930 param
->wq
.buf_numa_node
= dev_to_node(&priv
->mdev
->pdev
->dev
);
1931 param
->wq
.linear
= 1;
1934 static void mlx5e_build_drop_rq_param(struct mlx5_core_dev
*mdev
,
1935 struct mlx5e_rq_param
*param
)
1937 void *rqc
= param
->rqc
;
1938 void *wq
= MLX5_ADDR_OF(rqc
, rqc
, wq
);
1940 MLX5_SET(wq
, wq
, wq_type
, MLX5_WQ_TYPE_LINKED_LIST
);
1941 MLX5_SET(wq
, wq
, log_wq_stride
, ilog2(sizeof(struct mlx5e_rx_wqe
)));
1943 param
->wq
.buf_numa_node
= dev_to_node(&mdev
->pdev
->dev
);
1946 static void mlx5e_build_sq_param_common(struct mlx5e_priv
*priv
,
1947 struct mlx5e_sq_param
*param
)
1949 void *sqc
= param
->sqc
;
1950 void *wq
= MLX5_ADDR_OF(sqc
, sqc
, wq
);
1952 MLX5_SET(wq
, wq
, log_wq_stride
, ilog2(MLX5_SEND_WQE_BB
));
1953 MLX5_SET(wq
, wq
, pd
, priv
->mdev
->mlx5e_res
.pdn
);
1955 param
->wq
.buf_numa_node
= dev_to_node(&priv
->mdev
->pdev
->dev
);
1958 static void mlx5e_build_sq_param(struct mlx5e_priv
*priv
,
1959 struct mlx5e_params
*params
,
1960 struct mlx5e_sq_param
*param
)
1962 void *sqc
= param
->sqc
;
1963 void *wq
= MLX5_ADDR_OF(sqc
, sqc
, wq
);
1965 mlx5e_build_sq_param_common(priv
, param
);
1966 MLX5_SET(wq
, wq
, log_wq_sz
, params
->log_sq_size
);
1967 MLX5_SET(sqc
, sqc
, allow_swp
, !!MLX5_IPSEC_DEV(priv
->mdev
));
1970 static void mlx5e_build_common_cq_param(struct mlx5e_priv
*priv
,
1971 struct mlx5e_cq_param
*param
)
1973 void *cqc
= param
->cqc
;
1975 MLX5_SET(cqc
, cqc
, uar_page
, priv
->mdev
->priv
.uar
->index
);
1978 static void mlx5e_build_rx_cq_param(struct mlx5e_priv
*priv
,
1979 struct mlx5e_params
*params
,
1980 struct mlx5e_cq_param
*param
)
1982 void *cqc
= param
->cqc
;
1985 switch (params
->rq_wq_type
) {
1986 case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ
:
1987 log_cq_size
= params
->log_rq_size
+ params
->mpwqe_log_num_strides
;
1989 default: /* MLX5_WQ_TYPE_LINKED_LIST */
1990 log_cq_size
= params
->log_rq_size
;
1993 MLX5_SET(cqc
, cqc
, log_cq_size
, log_cq_size
);
1994 if (MLX5E_GET_PFLAG(params
, MLX5E_PFLAG_RX_CQE_COMPRESS
)) {
1995 MLX5_SET(cqc
, cqc
, mini_cqe_res_format
, MLX5_CQE_FORMAT_CSUM
);
1996 MLX5_SET(cqc
, cqc
, cqe_comp_en
, 1);
1999 mlx5e_build_common_cq_param(priv
, param
);
2000 param
->cq_period_mode
= params
->rx_cq_moderation
.cq_period_mode
;
2003 static void mlx5e_build_tx_cq_param(struct mlx5e_priv
*priv
,
2004 struct mlx5e_params
*params
,
2005 struct mlx5e_cq_param
*param
)
2007 void *cqc
= param
->cqc
;
2009 MLX5_SET(cqc
, cqc
, log_cq_size
, params
->log_sq_size
);
2011 mlx5e_build_common_cq_param(priv
, param
);
2012 param
->cq_period_mode
= params
->tx_cq_moderation
.cq_period_mode
;
2015 static void mlx5e_build_ico_cq_param(struct mlx5e_priv
*priv
,
2017 struct mlx5e_cq_param
*param
)
2019 void *cqc
= param
->cqc
;
2021 MLX5_SET(cqc
, cqc
, log_cq_size
, log_wq_size
);
2023 mlx5e_build_common_cq_param(priv
, param
);
2025 param
->cq_period_mode
= MLX5_CQ_PERIOD_MODE_START_FROM_EQE
;
2028 static void mlx5e_build_icosq_param(struct mlx5e_priv
*priv
,
2030 struct mlx5e_sq_param
*param
)
2032 void *sqc
= param
->sqc
;
2033 void *wq
= MLX5_ADDR_OF(sqc
, sqc
, wq
);
2035 mlx5e_build_sq_param_common(priv
, param
);
2037 MLX5_SET(wq
, wq
, log_wq_sz
, log_wq_size
);
2038 MLX5_SET(sqc
, sqc
, reg_umr
, MLX5_CAP_ETH(priv
->mdev
, reg_umr_sq
));
2041 static void mlx5e_build_xdpsq_param(struct mlx5e_priv
*priv
,
2042 struct mlx5e_params
*params
,
2043 struct mlx5e_sq_param
*param
)
2045 void *sqc
= param
->sqc
;
2046 void *wq
= MLX5_ADDR_OF(sqc
, sqc
, wq
);
2048 mlx5e_build_sq_param_common(priv
, param
);
2049 MLX5_SET(wq
, wq
, log_wq_sz
, params
->log_sq_size
);
2052 static void mlx5e_build_channel_param(struct mlx5e_priv
*priv
,
2053 struct mlx5e_params
*params
,
2054 struct mlx5e_channel_param
*cparam
)
2056 u8 icosq_log_wq_sz
= MLX5E_PARAMS_MINIMUM_LOG_SQ_SIZE
;
2058 mlx5e_build_rq_param(priv
, params
, &cparam
->rq
);
2059 mlx5e_build_sq_param(priv
, params
, &cparam
->sq
);
2060 mlx5e_build_xdpsq_param(priv
, params
, &cparam
->xdp_sq
);
2061 mlx5e_build_icosq_param(priv
, icosq_log_wq_sz
, &cparam
->icosq
);
2062 mlx5e_build_rx_cq_param(priv
, params
, &cparam
->rx_cq
);
2063 mlx5e_build_tx_cq_param(priv
, params
, &cparam
->tx_cq
);
2064 mlx5e_build_ico_cq_param(priv
, icosq_log_wq_sz
, &cparam
->icosq_cq
);
2067 int mlx5e_open_channels(struct mlx5e_priv
*priv
,
2068 struct mlx5e_channels
*chs
)
2070 struct mlx5e_channel_param
*cparam
;
2074 chs
->num
= chs
->params
.num_channels
;
2076 chs
->c
= kcalloc(chs
->num
, sizeof(struct mlx5e_channel
*), GFP_KERNEL
);
2077 cparam
= kzalloc(sizeof(struct mlx5e_channel_param
), GFP_KERNEL
);
2078 if (!chs
->c
|| !cparam
)
2081 mlx5e_build_channel_param(priv
, &chs
->params
, cparam
);
2082 for (i
= 0; i
< chs
->num
; i
++) {
2083 err
= mlx5e_open_channel(priv
, i
, &chs
->params
, cparam
, &chs
->c
[i
]);
2085 goto err_close_channels
;
2092 for (i
--; i
>= 0; i
--)
2093 mlx5e_close_channel(chs
->c
[i
]);
2102 static void mlx5e_activate_channels(struct mlx5e_channels
*chs
)
2106 for (i
= 0; i
< chs
->num
; i
++)
2107 mlx5e_activate_channel(chs
->c
[i
]);
2110 static int mlx5e_wait_channels_min_rx_wqes(struct mlx5e_channels
*chs
)
2115 for (i
= 0; i
< chs
->num
; i
++) {
2116 err
= mlx5e_wait_for_min_rx_wqes(&chs
->c
[i
]->rq
);
2124 static void mlx5e_deactivate_channels(struct mlx5e_channels
*chs
)
2128 for (i
= 0; i
< chs
->num
; i
++)
2129 mlx5e_deactivate_channel(chs
->c
[i
]);
2132 void mlx5e_close_channels(struct mlx5e_channels
*chs
)
2136 for (i
= 0; i
< chs
->num
; i
++)
2137 mlx5e_close_channel(chs
->c
[i
]);
2144 mlx5e_create_rqt(struct mlx5e_priv
*priv
, int sz
, struct mlx5e_rqt
*rqt
)
2146 struct mlx5_core_dev
*mdev
= priv
->mdev
;
2153 inlen
= MLX5_ST_SZ_BYTES(create_rqt_in
) + sizeof(u32
) * sz
;
2154 in
= kvzalloc(inlen
, GFP_KERNEL
);
2158 rqtc
= MLX5_ADDR_OF(create_rqt_in
, in
, rqt_context
);
2160 MLX5_SET(rqtc
, rqtc
, rqt_actual_size
, sz
);
2161 MLX5_SET(rqtc
, rqtc
, rqt_max_size
, sz
);
2163 for (i
= 0; i
< sz
; i
++)
2164 MLX5_SET(rqtc
, rqtc
, rq_num
[i
], priv
->drop_rq
.rqn
);
2166 err
= mlx5_core_create_rqt(mdev
, in
, inlen
, &rqt
->rqtn
);
2168 rqt
->enabled
= true;
2174 void mlx5e_destroy_rqt(struct mlx5e_priv
*priv
, struct mlx5e_rqt
*rqt
)
2176 rqt
->enabled
= false;
2177 mlx5_core_destroy_rqt(priv
->mdev
, rqt
->rqtn
);
2180 int mlx5e_create_indirect_rqt(struct mlx5e_priv
*priv
)
2182 struct mlx5e_rqt
*rqt
= &priv
->indir_rqt
;
2185 err
= mlx5e_create_rqt(priv
, MLX5E_INDIR_RQT_SIZE
, rqt
);
2187 mlx5_core_warn(priv
->mdev
, "create indirect rqts failed, %d\n", err
);
2191 int mlx5e_create_direct_rqts(struct mlx5e_priv
*priv
)
2193 struct mlx5e_rqt
*rqt
;
2197 for (ix
= 0; ix
< priv
->profile
->max_nch(priv
->mdev
); ix
++) {
2198 rqt
= &priv
->direct_tir
[ix
].rqt
;
2199 err
= mlx5e_create_rqt(priv
, 1 /*size */, rqt
);
2201 goto err_destroy_rqts
;
2207 mlx5_core_warn(priv
->mdev
, "create direct rqts failed, %d\n", err
);
2208 for (ix
--; ix
>= 0; ix
--)
2209 mlx5e_destroy_rqt(priv
, &priv
->direct_tir
[ix
].rqt
);
2214 void mlx5e_destroy_direct_rqts(struct mlx5e_priv
*priv
)
2218 for (i
= 0; i
< priv
->profile
->max_nch(priv
->mdev
); i
++)
2219 mlx5e_destroy_rqt(priv
, &priv
->direct_tir
[i
].rqt
);
2222 static int mlx5e_rx_hash_fn(int hfunc
)
2224 return (hfunc
== ETH_RSS_HASH_TOP
) ?
2225 MLX5_RX_HASH_FN_TOEPLITZ
:
2226 MLX5_RX_HASH_FN_INVERTED_XOR8
;
2229 static int mlx5e_bits_invert(unsigned long a
, int size
)
2234 for (i
= 0; i
< size
; i
++)
2235 inv
|= (test_bit(size
- i
- 1, &a
) ? 1 : 0) << i
;
2240 static void mlx5e_fill_rqt_rqns(struct mlx5e_priv
*priv
, int sz
,
2241 struct mlx5e_redirect_rqt_param rrp
, void *rqtc
)
2245 for (i
= 0; i
< sz
; i
++) {
2251 if (rrp
.rss
.hfunc
== ETH_RSS_HASH_XOR
)
2252 ix
= mlx5e_bits_invert(i
, ilog2(sz
));
2254 ix
= priv
->channels
.params
.indirection_rqt
[ix
];
2255 rqn
= rrp
.rss
.channels
->c
[ix
]->rq
.rqn
;
2259 MLX5_SET(rqtc
, rqtc
, rq_num
[i
], rqn
);
2263 int mlx5e_redirect_rqt(struct mlx5e_priv
*priv
, u32 rqtn
, int sz
,
2264 struct mlx5e_redirect_rqt_param rrp
)
2266 struct mlx5_core_dev
*mdev
= priv
->mdev
;
2272 inlen
= MLX5_ST_SZ_BYTES(modify_rqt_in
) + sizeof(u32
) * sz
;
2273 in
= kvzalloc(inlen
, GFP_KERNEL
);
2277 rqtc
= MLX5_ADDR_OF(modify_rqt_in
, in
, ctx
);
2279 MLX5_SET(rqtc
, rqtc
, rqt_actual_size
, sz
);
2280 MLX5_SET(modify_rqt_in
, in
, bitmask
.rqn_list
, 1);
2281 mlx5e_fill_rqt_rqns(priv
, sz
, rrp
, rqtc
);
2282 err
= mlx5_core_modify_rqt(mdev
, rqtn
, in
, inlen
);
2288 static u32
mlx5e_get_direct_rqn(struct mlx5e_priv
*priv
, int ix
,
2289 struct mlx5e_redirect_rqt_param rrp
)
2294 if (ix
>= rrp
.rss
.channels
->num
)
2295 return priv
->drop_rq
.rqn
;
2297 return rrp
.rss
.channels
->c
[ix
]->rq
.rqn
;
2300 static void mlx5e_redirect_rqts(struct mlx5e_priv
*priv
,
2301 struct mlx5e_redirect_rqt_param rrp
)
2306 if (priv
->indir_rqt
.enabled
) {
2308 rqtn
= priv
->indir_rqt
.rqtn
;
2309 mlx5e_redirect_rqt(priv
, rqtn
, MLX5E_INDIR_RQT_SIZE
, rrp
);
2312 for (ix
= 0; ix
< priv
->profile
->max_nch(priv
->mdev
); ix
++) {
2313 struct mlx5e_redirect_rqt_param direct_rrp
= {
2316 .rqn
= mlx5e_get_direct_rqn(priv
, ix
, rrp
)
2320 /* Direct RQ Tables */
2321 if (!priv
->direct_tir
[ix
].rqt
.enabled
)
2324 rqtn
= priv
->direct_tir
[ix
].rqt
.rqtn
;
2325 mlx5e_redirect_rqt(priv
, rqtn
, 1, direct_rrp
);
2329 static void mlx5e_redirect_rqts_to_channels(struct mlx5e_priv
*priv
,
2330 struct mlx5e_channels
*chs
)
2332 struct mlx5e_redirect_rqt_param rrp
= {
2337 .hfunc
= chs
->params
.rss_hfunc
,
2342 mlx5e_redirect_rqts(priv
, rrp
);
2345 static void mlx5e_redirect_rqts_to_drop(struct mlx5e_priv
*priv
)
2347 struct mlx5e_redirect_rqt_param drop_rrp
= {
2350 .rqn
= priv
->drop_rq
.rqn
,
2354 mlx5e_redirect_rqts(priv
, drop_rrp
);
2357 static void mlx5e_build_tir_ctx_lro(struct mlx5e_params
*params
, void *tirc
)
2359 if (!params
->lro_en
)
2362 #define ROUGH_MAX_L2_L3_HDR_SZ 256
2364 MLX5_SET(tirc
, tirc
, lro_enable_mask
,
2365 MLX5_TIRC_LRO_ENABLE_MASK_IPV4_LRO
|
2366 MLX5_TIRC_LRO_ENABLE_MASK_IPV6_LRO
);
2367 MLX5_SET(tirc
, tirc
, lro_max_ip_payload_size
,
2368 (params
->lro_wqe_sz
- ROUGH_MAX_L2_L3_HDR_SZ
) >> 8);
2369 MLX5_SET(tirc
, tirc
, lro_timeout_period_usecs
, params
->lro_timeout
);
2372 void mlx5e_build_indir_tir_ctx_hash(struct mlx5e_params
*params
,
2373 enum mlx5e_traffic_types tt
,
2374 void *tirc
, bool inner
)
2376 void *hfso
= inner
? MLX5_ADDR_OF(tirc
, tirc
, rx_hash_field_selector_inner
) :
2377 MLX5_ADDR_OF(tirc
, tirc
, rx_hash_field_selector_outer
);
2379 #define MLX5_HASH_IP (MLX5_HASH_FIELD_SEL_SRC_IP |\
2380 MLX5_HASH_FIELD_SEL_DST_IP)
2382 #define MLX5_HASH_IP_L4PORTS (MLX5_HASH_FIELD_SEL_SRC_IP |\
2383 MLX5_HASH_FIELD_SEL_DST_IP |\
2384 MLX5_HASH_FIELD_SEL_L4_SPORT |\
2385 MLX5_HASH_FIELD_SEL_L4_DPORT)
2387 #define MLX5_HASH_IP_IPSEC_SPI (MLX5_HASH_FIELD_SEL_SRC_IP |\
2388 MLX5_HASH_FIELD_SEL_DST_IP |\
2389 MLX5_HASH_FIELD_SEL_IPSEC_SPI)
2391 MLX5_SET(tirc
, tirc
, rx_hash_fn
, mlx5e_rx_hash_fn(params
->rss_hfunc
));
2392 if (params
->rss_hfunc
== ETH_RSS_HASH_TOP
) {
2393 void *rss_key
= MLX5_ADDR_OF(tirc
, tirc
,
2394 rx_hash_toeplitz_key
);
2395 size_t len
= MLX5_FLD_SZ_BYTES(tirc
,
2396 rx_hash_toeplitz_key
);
2398 MLX5_SET(tirc
, tirc
, rx_hash_symmetric
, 1);
2399 memcpy(rss_key
, params
->toeplitz_hash_key
, len
);
2403 case MLX5E_TT_IPV4_TCP
:
2404 MLX5_SET(rx_hash_field_select
, hfso
, l3_prot_type
,
2405 MLX5_L3_PROT_TYPE_IPV4
);
2406 MLX5_SET(rx_hash_field_select
, hfso
, l4_prot_type
,
2407 MLX5_L4_PROT_TYPE_TCP
);
2408 MLX5_SET(rx_hash_field_select
, hfso
, selected_fields
,
2409 MLX5_HASH_IP_L4PORTS
);
2412 case MLX5E_TT_IPV6_TCP
:
2413 MLX5_SET(rx_hash_field_select
, hfso
, l3_prot_type
,
2414 MLX5_L3_PROT_TYPE_IPV6
);
2415 MLX5_SET(rx_hash_field_select
, hfso
, l4_prot_type
,
2416 MLX5_L4_PROT_TYPE_TCP
);
2417 MLX5_SET(rx_hash_field_select
, hfso
, selected_fields
,
2418 MLX5_HASH_IP_L4PORTS
);
2421 case MLX5E_TT_IPV4_UDP
:
2422 MLX5_SET(rx_hash_field_select
, hfso
, l3_prot_type
,
2423 MLX5_L3_PROT_TYPE_IPV4
);
2424 MLX5_SET(rx_hash_field_select
, hfso
, l4_prot_type
,
2425 MLX5_L4_PROT_TYPE_UDP
);
2426 MLX5_SET(rx_hash_field_select
, hfso
, selected_fields
,
2427 MLX5_HASH_IP_L4PORTS
);
2430 case MLX5E_TT_IPV6_UDP
:
2431 MLX5_SET(rx_hash_field_select
, hfso
, l3_prot_type
,
2432 MLX5_L3_PROT_TYPE_IPV6
);
2433 MLX5_SET(rx_hash_field_select
, hfso
, l4_prot_type
,
2434 MLX5_L4_PROT_TYPE_UDP
);
2435 MLX5_SET(rx_hash_field_select
, hfso
, selected_fields
,
2436 MLX5_HASH_IP_L4PORTS
);
2439 case MLX5E_TT_IPV4_IPSEC_AH
:
2440 MLX5_SET(rx_hash_field_select
, hfso
, l3_prot_type
,
2441 MLX5_L3_PROT_TYPE_IPV4
);
2442 MLX5_SET(rx_hash_field_select
, hfso
, selected_fields
,
2443 MLX5_HASH_IP_IPSEC_SPI
);
2446 case MLX5E_TT_IPV6_IPSEC_AH
:
2447 MLX5_SET(rx_hash_field_select
, hfso
, l3_prot_type
,
2448 MLX5_L3_PROT_TYPE_IPV6
);
2449 MLX5_SET(rx_hash_field_select
, hfso
, selected_fields
,
2450 MLX5_HASH_IP_IPSEC_SPI
);
2453 case MLX5E_TT_IPV4_IPSEC_ESP
:
2454 MLX5_SET(rx_hash_field_select
, hfso
, l3_prot_type
,
2455 MLX5_L3_PROT_TYPE_IPV4
);
2456 MLX5_SET(rx_hash_field_select
, hfso
, selected_fields
,
2457 MLX5_HASH_IP_IPSEC_SPI
);
2460 case MLX5E_TT_IPV6_IPSEC_ESP
:
2461 MLX5_SET(rx_hash_field_select
, hfso
, l3_prot_type
,
2462 MLX5_L3_PROT_TYPE_IPV6
);
2463 MLX5_SET(rx_hash_field_select
, hfso
, selected_fields
,
2464 MLX5_HASH_IP_IPSEC_SPI
);
2468 MLX5_SET(rx_hash_field_select
, hfso
, l3_prot_type
,
2469 MLX5_L3_PROT_TYPE_IPV4
);
2470 MLX5_SET(rx_hash_field_select
, hfso
, selected_fields
,
2475 MLX5_SET(rx_hash_field_select
, hfso
, l3_prot_type
,
2476 MLX5_L3_PROT_TYPE_IPV6
);
2477 MLX5_SET(rx_hash_field_select
, hfso
, selected_fields
,
2481 WARN_ONCE(true, "%s: bad traffic type!\n", __func__
);
2485 static int mlx5e_modify_tirs_lro(struct mlx5e_priv
*priv
)
2487 struct mlx5_core_dev
*mdev
= priv
->mdev
;
2496 inlen
= MLX5_ST_SZ_BYTES(modify_tir_in
);
2497 in
= kvzalloc(inlen
, GFP_KERNEL
);
2501 MLX5_SET(modify_tir_in
, in
, bitmask
.lro
, 1);
2502 tirc
= MLX5_ADDR_OF(modify_tir_in
, in
, ctx
);
2504 mlx5e_build_tir_ctx_lro(&priv
->channels
.params
, tirc
);
2506 for (tt
= 0; tt
< MLX5E_NUM_INDIR_TIRS
; tt
++) {
2507 err
= mlx5_core_modify_tir(mdev
, priv
->indir_tir
[tt
].tirn
, in
,
2513 for (ix
= 0; ix
< priv
->profile
->max_nch(priv
->mdev
); ix
++) {
2514 err
= mlx5_core_modify_tir(mdev
, priv
->direct_tir
[ix
].tirn
,
2526 static void mlx5e_build_inner_indir_tir_ctx(struct mlx5e_priv
*priv
,
2527 enum mlx5e_traffic_types tt
,
2530 MLX5_SET(tirc
, tirc
, transport_domain
, priv
->mdev
->mlx5e_res
.td
.tdn
);
2532 mlx5e_build_tir_ctx_lro(&priv
->channels
.params
, tirc
);
2534 MLX5_SET(tirc
, tirc
, disp_type
, MLX5_TIRC_DISP_TYPE_INDIRECT
);
2535 MLX5_SET(tirc
, tirc
, indirect_table
, priv
->indir_rqt
.rqtn
);
2536 MLX5_SET(tirc
, tirc
, tunneled_offload_en
, 0x1);
2538 mlx5e_build_indir_tir_ctx_hash(&priv
->channels
.params
, tt
, tirc
, true);
2541 static int mlx5e_set_mtu(struct mlx5e_priv
*priv
, u16 mtu
)
2543 struct mlx5_core_dev
*mdev
= priv
->mdev
;
2544 u16 hw_mtu
= MLX5E_SW2HW_MTU(priv
, mtu
);
2547 err
= mlx5_set_port_mtu(mdev
, hw_mtu
, 1);
2551 /* Update vport context MTU */
2552 mlx5_modify_nic_vport_mtu(mdev
, hw_mtu
);
2556 static void mlx5e_query_mtu(struct mlx5e_priv
*priv
, u16
*mtu
)
2558 struct mlx5_core_dev
*mdev
= priv
->mdev
;
2562 err
= mlx5_query_nic_vport_mtu(mdev
, &hw_mtu
);
2563 if (err
|| !hw_mtu
) /* fallback to port oper mtu */
2564 mlx5_query_port_oper_mtu(mdev
, &hw_mtu
, 1);
2566 *mtu
= MLX5E_HW2SW_MTU(priv
, hw_mtu
);
2569 static int mlx5e_set_dev_port_mtu(struct mlx5e_priv
*priv
)
2571 struct net_device
*netdev
= priv
->netdev
;
2575 err
= mlx5e_set_mtu(priv
, netdev
->mtu
);
2579 mlx5e_query_mtu(priv
, &mtu
);
2580 if (mtu
!= netdev
->mtu
)
2581 netdev_warn(netdev
, "%s: VPort MTU %d is different than netdev mtu %d\n",
2582 __func__
, mtu
, netdev
->mtu
);
2588 static void mlx5e_netdev_set_tcs(struct net_device
*netdev
)
2590 struct mlx5e_priv
*priv
= netdev_priv(netdev
);
2591 int nch
= priv
->channels
.params
.num_channels
;
2592 int ntc
= priv
->channels
.params
.num_tc
;
2595 netdev_reset_tc(netdev
);
2600 netdev_set_num_tc(netdev
, ntc
);
2602 /* Map netdev TCs to offset 0
2603 * We have our own UP to TXQ mapping for QoS
2605 for (tc
= 0; tc
< ntc
; tc
++)
2606 netdev_set_tc_queue(netdev
, tc
, nch
, 0);
2609 static void mlx5e_build_channels_tx_maps(struct mlx5e_priv
*priv
)
2611 struct mlx5e_channel
*c
;
2612 struct mlx5e_txqsq
*sq
;
2615 for (i
= 0; i
< priv
->channels
.num
; i
++)
2616 for (tc
= 0; tc
< priv
->profile
->max_tc
; tc
++)
2617 priv
->channel_tc2txq
[i
][tc
] = i
+ tc
* priv
->channels
.num
;
2619 for (i
= 0; i
< priv
->channels
.num
; i
++) {
2620 c
= priv
->channels
.c
[i
];
2621 for (tc
= 0; tc
< c
->num_tc
; tc
++) {
2623 priv
->txq2sq
[sq
->txq_ix
] = sq
;
2628 void mlx5e_activate_priv_channels(struct mlx5e_priv
*priv
)
2630 int num_txqs
= priv
->channels
.num
* priv
->channels
.params
.num_tc
;
2631 struct net_device
*netdev
= priv
->netdev
;
2633 mlx5e_netdev_set_tcs(netdev
);
2634 netif_set_real_num_tx_queues(netdev
, num_txqs
);
2635 netif_set_real_num_rx_queues(netdev
, priv
->channels
.num
);
2637 mlx5e_build_channels_tx_maps(priv
);
2638 mlx5e_activate_channels(&priv
->channels
);
2639 netif_tx_start_all_queues(priv
->netdev
);
2641 if (MLX5_ESWITCH_MANAGER(priv
->mdev
))
2642 mlx5e_add_sqs_fwd_rules(priv
);
2644 mlx5e_wait_channels_min_rx_wqes(&priv
->channels
);
2645 mlx5e_redirect_rqts_to_channels(priv
, &priv
->channels
);
2648 void mlx5e_deactivate_priv_channels(struct mlx5e_priv
*priv
)
2650 mlx5e_redirect_rqts_to_drop(priv
);
2652 if (MLX5_ESWITCH_MANAGER(priv
->mdev
))
2653 mlx5e_remove_sqs_fwd_rules(priv
);
2655 /* FIXME: This is a W/A only for tx timeout watch dog false alarm when
2656 * polling for inactive tx queues.
2658 netif_tx_stop_all_queues(priv
->netdev
);
2659 netif_tx_disable(priv
->netdev
);
2660 mlx5e_deactivate_channels(&priv
->channels
);
2663 void mlx5e_switch_priv_channels(struct mlx5e_priv
*priv
,
2664 struct mlx5e_channels
*new_chs
,
2665 mlx5e_fp_hw_modify hw_modify
)
2667 struct net_device
*netdev
= priv
->netdev
;
2670 new_num_txqs
= new_chs
->num
* new_chs
->params
.num_tc
;
2672 carrier_ok
= netif_carrier_ok(netdev
);
2673 netif_carrier_off(netdev
);
2675 if (new_num_txqs
< netdev
->real_num_tx_queues
)
2676 netif_set_real_num_tx_queues(netdev
, new_num_txqs
);
2678 mlx5e_deactivate_priv_channels(priv
);
2679 mlx5e_close_channels(&priv
->channels
);
2681 priv
->channels
= *new_chs
;
2683 /* New channels are ready to roll, modify HW settings if needed */
2687 mlx5e_refresh_tirs(priv
, false);
2688 mlx5e_activate_priv_channels(priv
);
2690 /* return carrier back if needed */
2692 netif_carrier_on(netdev
);
2695 void mlx5e_timestamp_init(struct mlx5e_priv
*priv
)
2697 priv
->tstamp
.tx_type
= HWTSTAMP_TX_OFF
;
2698 priv
->tstamp
.rx_filter
= HWTSTAMP_FILTER_NONE
;
2701 int mlx5e_open_locked(struct net_device
*netdev
)
2703 struct mlx5e_priv
*priv
= netdev_priv(netdev
);
2706 set_bit(MLX5E_STATE_OPENED
, &priv
->state
);
2708 err
= mlx5e_open_channels(priv
, &priv
->channels
);
2710 goto err_clear_state_opened_flag
;
2712 mlx5e_refresh_tirs(priv
, false);
2713 mlx5e_activate_priv_channels(priv
);
2714 if (priv
->profile
->update_carrier
)
2715 priv
->profile
->update_carrier(priv
);
2717 if (priv
->profile
->update_stats
)
2718 queue_delayed_work(priv
->wq
, &priv
->update_stats_work
, 0);
2722 err_clear_state_opened_flag
:
2723 clear_bit(MLX5E_STATE_OPENED
, &priv
->state
);
2727 int mlx5e_open(struct net_device
*netdev
)
2729 struct mlx5e_priv
*priv
= netdev_priv(netdev
);
2732 mutex_lock(&priv
->state_lock
);
2733 err
= mlx5e_open_locked(netdev
);
2735 mlx5_set_port_admin_status(priv
->mdev
, MLX5_PORT_UP
);
2736 mutex_unlock(&priv
->state_lock
);
2738 if (mlx5e_vxlan_allowed(priv
->mdev
))
2739 udp_tunnel_get_rx_info(netdev
);
2744 int mlx5e_close_locked(struct net_device
*netdev
)
2746 struct mlx5e_priv
*priv
= netdev_priv(netdev
);
2748 /* May already be CLOSED in case a previous configuration operation
2749 * (e.g RX/TX queue size change) that involves close&open failed.
2751 if (!test_bit(MLX5E_STATE_OPENED
, &priv
->state
))
2754 clear_bit(MLX5E_STATE_OPENED
, &priv
->state
);
2756 netif_carrier_off(priv
->netdev
);
2757 mlx5e_deactivate_priv_channels(priv
);
2758 mlx5e_close_channels(&priv
->channels
);
2763 int mlx5e_close(struct net_device
*netdev
)
2765 struct mlx5e_priv
*priv
= netdev_priv(netdev
);
2768 if (!netif_device_present(netdev
))
2771 mutex_lock(&priv
->state_lock
);
2772 mlx5_set_port_admin_status(priv
->mdev
, MLX5_PORT_DOWN
);
2773 err
= mlx5e_close_locked(netdev
);
2774 mutex_unlock(&priv
->state_lock
);
2779 static int mlx5e_alloc_drop_rq(struct mlx5_core_dev
*mdev
,
2780 struct mlx5e_rq
*rq
,
2781 struct mlx5e_rq_param
*param
)
2783 void *rqc
= param
->rqc
;
2784 void *rqc_wq
= MLX5_ADDR_OF(rqc
, rqc
, wq
);
2787 param
->wq
.db_numa_node
= param
->wq
.buf_numa_node
;
2789 err
= mlx5_wq_ll_create(mdev
, ¶m
->wq
, rqc_wq
, &rq
->wq
,
2799 static int mlx5e_alloc_drop_cq(struct mlx5_core_dev
*mdev
,
2800 struct mlx5e_cq
*cq
,
2801 struct mlx5e_cq_param
*param
)
2803 param
->wq
.buf_numa_node
= dev_to_node(&mdev
->pdev
->dev
);
2804 param
->wq
.db_numa_node
= dev_to_node(&mdev
->pdev
->dev
);
2806 return mlx5e_alloc_cq_common(mdev
, param
, cq
);
2809 static int mlx5e_open_drop_rq(struct mlx5_core_dev
*mdev
,
2810 struct mlx5e_rq
*drop_rq
)
2812 struct mlx5e_cq_param cq_param
= {};
2813 struct mlx5e_rq_param rq_param
= {};
2814 struct mlx5e_cq
*cq
= &drop_rq
->cq
;
2817 mlx5e_build_drop_rq_param(mdev
, &rq_param
);
2819 err
= mlx5e_alloc_drop_cq(mdev
, cq
, &cq_param
);
2823 err
= mlx5e_create_cq(cq
, &cq_param
);
2827 err
= mlx5e_alloc_drop_rq(mdev
, drop_rq
, &rq_param
);
2829 goto err_destroy_cq
;
2831 err
= mlx5e_create_rq(drop_rq
, &rq_param
);
2838 mlx5e_free_rq(drop_rq
);
2841 mlx5e_destroy_cq(cq
);
2849 static void mlx5e_close_drop_rq(struct mlx5e_rq
*drop_rq
)
2851 mlx5e_destroy_rq(drop_rq
);
2852 mlx5e_free_rq(drop_rq
);
2853 mlx5e_destroy_cq(&drop_rq
->cq
);
2854 mlx5e_free_cq(&drop_rq
->cq
);
2857 int mlx5e_create_tis(struct mlx5_core_dev
*mdev
, int tc
,
2858 u32 underlay_qpn
, u32
*tisn
)
2860 u32 in
[MLX5_ST_SZ_DW(create_tis_in
)] = {0};
2861 void *tisc
= MLX5_ADDR_OF(create_tis_in
, in
, ctx
);
2863 MLX5_SET(tisc
, tisc
, prio
, tc
<< 1);
2864 MLX5_SET(tisc
, tisc
, underlay_qpn
, underlay_qpn
);
2865 MLX5_SET(tisc
, tisc
, transport_domain
, mdev
->mlx5e_res
.td
.tdn
);
2867 if (mlx5_lag_is_lacp_owner(mdev
))
2868 MLX5_SET(tisc
, tisc
, strict_lag_tx_port_affinity
, 1);
2870 return mlx5_core_create_tis(mdev
, in
, sizeof(in
), tisn
);
2873 void mlx5e_destroy_tis(struct mlx5_core_dev
*mdev
, u32 tisn
)
2875 mlx5_core_destroy_tis(mdev
, tisn
);
2878 int mlx5e_create_tises(struct mlx5e_priv
*priv
)
2883 for (tc
= 0; tc
< priv
->profile
->max_tc
; tc
++) {
2884 err
= mlx5e_create_tis(priv
->mdev
, tc
, 0, &priv
->tisn
[tc
]);
2886 goto err_close_tises
;
2892 for (tc
--; tc
>= 0; tc
--)
2893 mlx5e_destroy_tis(priv
->mdev
, priv
->tisn
[tc
]);
2898 void mlx5e_cleanup_nic_tx(struct mlx5e_priv
*priv
)
2902 for (tc
= 0; tc
< priv
->profile
->max_tc
; tc
++)
2903 mlx5e_destroy_tis(priv
->mdev
, priv
->tisn
[tc
]);
2906 static void mlx5e_build_indir_tir_ctx(struct mlx5e_priv
*priv
,
2907 enum mlx5e_traffic_types tt
,
2910 MLX5_SET(tirc
, tirc
, transport_domain
, priv
->mdev
->mlx5e_res
.td
.tdn
);
2912 mlx5e_build_tir_ctx_lro(&priv
->channels
.params
, tirc
);
2914 MLX5_SET(tirc
, tirc
, disp_type
, MLX5_TIRC_DISP_TYPE_INDIRECT
);
2915 MLX5_SET(tirc
, tirc
, indirect_table
, priv
->indir_rqt
.rqtn
);
2916 mlx5e_build_indir_tir_ctx_hash(&priv
->channels
.params
, tt
, tirc
, false);
2919 static void mlx5e_build_direct_tir_ctx(struct mlx5e_priv
*priv
, u32 rqtn
, u32
*tirc
)
2921 MLX5_SET(tirc
, tirc
, transport_domain
, priv
->mdev
->mlx5e_res
.td
.tdn
);
2923 mlx5e_build_tir_ctx_lro(&priv
->channels
.params
, tirc
);
2925 MLX5_SET(tirc
, tirc
, disp_type
, MLX5_TIRC_DISP_TYPE_INDIRECT
);
2926 MLX5_SET(tirc
, tirc
, indirect_table
, rqtn
);
2927 MLX5_SET(tirc
, tirc
, rx_hash_fn
, MLX5_RX_HASH_FN_INVERTED_XOR8
);
2930 int mlx5e_create_indirect_tirs(struct mlx5e_priv
*priv
)
2932 struct mlx5e_tir
*tir
;
2940 inlen
= MLX5_ST_SZ_BYTES(create_tir_in
);
2941 in
= kvzalloc(inlen
, GFP_KERNEL
);
2945 for (tt
= 0; tt
< MLX5E_NUM_INDIR_TIRS
; tt
++) {
2946 memset(in
, 0, inlen
);
2947 tir
= &priv
->indir_tir
[tt
];
2948 tirc
= MLX5_ADDR_OF(create_tir_in
, in
, ctx
);
2949 mlx5e_build_indir_tir_ctx(priv
, tt
, tirc
);
2950 err
= mlx5e_create_tir(priv
->mdev
, tir
, in
, inlen
);
2952 mlx5_core_warn(priv
->mdev
, "create indirect tirs failed, %d\n", err
);
2953 goto err_destroy_inner_tirs
;
2957 if (!mlx5e_tunnel_inner_ft_supported(priv
->mdev
))
2960 for (i
= 0; i
< MLX5E_NUM_INDIR_TIRS
; i
++) {
2961 memset(in
, 0, inlen
);
2962 tir
= &priv
->inner_indir_tir
[i
];
2963 tirc
= MLX5_ADDR_OF(create_tir_in
, in
, ctx
);
2964 mlx5e_build_inner_indir_tir_ctx(priv
, i
, tirc
);
2965 err
= mlx5e_create_tir(priv
->mdev
, tir
, in
, inlen
);
2967 mlx5_core_warn(priv
->mdev
, "create inner indirect tirs failed, %d\n", err
);
2968 goto err_destroy_inner_tirs
;
2977 err_destroy_inner_tirs
:
2978 for (i
--; i
>= 0; i
--)
2979 mlx5e_destroy_tir(priv
->mdev
, &priv
->inner_indir_tir
[i
]);
2981 for (tt
--; tt
>= 0; tt
--)
2982 mlx5e_destroy_tir(priv
->mdev
, &priv
->indir_tir
[tt
]);
2989 int mlx5e_create_direct_tirs(struct mlx5e_priv
*priv
)
2991 int nch
= priv
->profile
->max_nch(priv
->mdev
);
2992 struct mlx5e_tir
*tir
;
2999 inlen
= MLX5_ST_SZ_BYTES(create_tir_in
);
3000 in
= kvzalloc(inlen
, GFP_KERNEL
);
3004 for (ix
= 0; ix
< nch
; ix
++) {
3005 memset(in
, 0, inlen
);
3006 tir
= &priv
->direct_tir
[ix
];
3007 tirc
= MLX5_ADDR_OF(create_tir_in
, in
, ctx
);
3008 mlx5e_build_direct_tir_ctx(priv
, priv
->direct_tir
[ix
].rqt
.rqtn
, tirc
);
3009 err
= mlx5e_create_tir(priv
->mdev
, tir
, in
, inlen
);
3011 goto err_destroy_ch_tirs
;
3018 err_destroy_ch_tirs
:
3019 mlx5_core_warn(priv
->mdev
, "create direct tirs failed, %d\n", err
);
3020 for (ix
--; ix
>= 0; ix
--)
3021 mlx5e_destroy_tir(priv
->mdev
, &priv
->direct_tir
[ix
]);
3028 void mlx5e_destroy_indirect_tirs(struct mlx5e_priv
*priv
)
3032 for (i
= 0; i
< MLX5E_NUM_INDIR_TIRS
; i
++)
3033 mlx5e_destroy_tir(priv
->mdev
, &priv
->indir_tir
[i
]);
3035 if (!mlx5e_tunnel_inner_ft_supported(priv
->mdev
))
3038 for (i
= 0; i
< MLX5E_NUM_INDIR_TIRS
; i
++)
3039 mlx5e_destroy_tir(priv
->mdev
, &priv
->inner_indir_tir
[i
]);
3042 void mlx5e_destroy_direct_tirs(struct mlx5e_priv
*priv
)
3044 int nch
= priv
->profile
->max_nch(priv
->mdev
);
3047 for (i
= 0; i
< nch
; i
++)
3048 mlx5e_destroy_tir(priv
->mdev
, &priv
->direct_tir
[i
]);
3051 static int mlx5e_modify_channels_scatter_fcs(struct mlx5e_channels
*chs
, bool enable
)
3056 for (i
= 0; i
< chs
->num
; i
++) {
3057 err
= mlx5e_modify_rq_scatter_fcs(&chs
->c
[i
]->rq
, enable
);
3065 static int mlx5e_modify_channels_vsd(struct mlx5e_channels
*chs
, bool vsd
)
3070 for (i
= 0; i
< chs
->num
; i
++) {
3071 err
= mlx5e_modify_rq_vsd(&chs
->c
[i
]->rq
, vsd
);
3079 static int mlx5e_setup_tc_mqprio(struct net_device
*netdev
,
3080 struct tc_mqprio_qopt
*mqprio
)
3082 struct mlx5e_priv
*priv
= netdev_priv(netdev
);
3083 struct mlx5e_channels new_channels
= {};
3084 u8 tc
= mqprio
->num_tc
;
3087 mqprio
->hw
= TC_MQPRIO_HW_OFFLOAD_TCS
;
3089 if (tc
&& tc
!= MLX5E_MAX_NUM_TC
)
3092 mutex_lock(&priv
->state_lock
);
3094 new_channels
.params
= priv
->channels
.params
;
3095 new_channels
.params
.num_tc
= tc
? tc
: 1;
3097 if (!test_bit(MLX5E_STATE_OPENED
, &priv
->state
)) {
3098 priv
->channels
.params
= new_channels
.params
;
3102 err
= mlx5e_open_channels(priv
, &new_channels
);
3106 mlx5e_switch_priv_channels(priv
, &new_channels
, NULL
);
3108 mutex_unlock(&priv
->state_lock
);
3112 #ifdef CONFIG_MLX5_ESWITCH
3113 static int mlx5e_setup_tc_cls_flower(struct mlx5e_priv
*priv
,
3114 struct tc_cls_flower_offload
*cls_flower
)
3116 if (cls_flower
->common
.chain_index
)
3119 switch (cls_flower
->command
) {
3120 case TC_CLSFLOWER_REPLACE
:
3121 return mlx5e_configure_flower(priv
, cls_flower
);
3122 case TC_CLSFLOWER_DESTROY
:
3123 return mlx5e_delete_flower(priv
, cls_flower
);
3124 case TC_CLSFLOWER_STATS
:
3125 return mlx5e_stats_flower(priv
, cls_flower
);
3131 int mlx5e_setup_tc_block_cb(enum tc_setup_type type
, void *type_data
,
3134 struct mlx5e_priv
*priv
= cb_priv
;
3136 if (!tc_can_offload(priv
->netdev
))
3140 case TC_SETUP_CLSFLOWER
:
3141 return mlx5e_setup_tc_cls_flower(priv
, type_data
);
3147 static int mlx5e_setup_tc_block(struct net_device
*dev
,
3148 struct tc_block_offload
*f
)
3150 struct mlx5e_priv
*priv
= netdev_priv(dev
);
3152 if (f
->binder_type
!= TCF_BLOCK_BINDER_TYPE_CLSACT_INGRESS
)
3155 switch (f
->command
) {
3157 return tcf_block_cb_register(f
->block
, mlx5e_setup_tc_block_cb
,
3159 case TC_BLOCK_UNBIND
:
3160 tcf_block_cb_unregister(f
->block
, mlx5e_setup_tc_block_cb
,
3169 int mlx5e_setup_tc(struct net_device
*dev
, enum tc_setup_type type
,
3173 #ifdef CONFIG_MLX5_ESWITCH
3174 case TC_SETUP_BLOCK
:
3175 return mlx5e_setup_tc_block(dev
, type_data
);
3177 case TC_SETUP_QDISC_MQPRIO
:
3178 return mlx5e_setup_tc_mqprio(dev
, type_data
);
3185 mlx5e_get_stats(struct net_device
*dev
, struct rtnl_link_stats64
*stats
)
3187 struct mlx5e_priv
*priv
= netdev_priv(dev
);
3188 struct mlx5e_sw_stats
*sstats
= &priv
->stats
.sw
;
3189 struct mlx5e_vport_stats
*vstats
= &priv
->stats
.vport
;
3190 struct mlx5e_pport_stats
*pstats
= &priv
->stats
.pport
;
3192 if (mlx5e_is_uplink_rep(priv
)) {
3193 stats
->rx_packets
= PPORT_802_3_GET(pstats
, a_frames_received_ok
);
3194 stats
->rx_bytes
= PPORT_802_3_GET(pstats
, a_octets_received_ok
);
3195 stats
->tx_packets
= PPORT_802_3_GET(pstats
, a_frames_transmitted_ok
);
3196 stats
->tx_bytes
= PPORT_802_3_GET(pstats
, a_octets_transmitted_ok
);
3198 stats
->rx_packets
= sstats
->rx_packets
;
3199 stats
->rx_bytes
= sstats
->rx_bytes
;
3200 stats
->tx_packets
= sstats
->tx_packets
;
3201 stats
->tx_bytes
= sstats
->tx_bytes
;
3202 stats
->tx_dropped
= sstats
->tx_queue_dropped
;
3205 stats
->rx_dropped
= priv
->stats
.qcnt
.rx_out_of_buffer
;
3207 stats
->rx_length_errors
=
3208 PPORT_802_3_GET(pstats
, a_in_range_length_errors
) +
3209 PPORT_802_3_GET(pstats
, a_out_of_range_length_field
) +
3210 PPORT_802_3_GET(pstats
, a_frame_too_long_errors
);
3211 stats
->rx_crc_errors
=
3212 PPORT_802_3_GET(pstats
, a_frame_check_sequence_errors
);
3213 stats
->rx_frame_errors
= PPORT_802_3_GET(pstats
, a_alignment_errors
);
3214 stats
->tx_aborted_errors
= PPORT_2863_GET(pstats
, if_out_discards
);
3215 stats
->rx_errors
= stats
->rx_length_errors
+ stats
->rx_crc_errors
+
3216 stats
->rx_frame_errors
;
3217 stats
->tx_errors
= stats
->tx_aborted_errors
+ stats
->tx_carrier_errors
;
3219 /* vport multicast also counts packets that are dropped due to steering
3220 * or rx out of buffer
3223 VPORT_COUNTER_GET(vstats
, received_eth_multicast
.packets
);
3226 static void mlx5e_set_rx_mode(struct net_device
*dev
)
3228 struct mlx5e_priv
*priv
= netdev_priv(dev
);
3230 queue_work(priv
->wq
, &priv
->set_rx_mode_work
);
3233 static int mlx5e_set_mac(struct net_device
*netdev
, void *addr
)
3235 struct mlx5e_priv
*priv
= netdev_priv(netdev
);
3236 struct sockaddr
*saddr
= addr
;
3238 if (!is_valid_ether_addr(saddr
->sa_data
))
3239 return -EADDRNOTAVAIL
;
3241 netif_addr_lock_bh(netdev
);
3242 ether_addr_copy(netdev
->dev_addr
, saddr
->sa_data
);
3243 netif_addr_unlock_bh(netdev
);
3245 queue_work(priv
->wq
, &priv
->set_rx_mode_work
);
3250 #define MLX5E_SET_FEATURE(features, feature, enable) \
3253 *features |= feature; \
3255 *features &= ~feature; \
3258 typedef int (*mlx5e_feature_handler
)(struct net_device
*netdev
, bool enable
);
3260 static int set_feature_lro(struct net_device
*netdev
, bool enable
)
3262 struct mlx5e_priv
*priv
= netdev_priv(netdev
);
3263 struct mlx5e_channels new_channels
= {};
3267 mutex_lock(&priv
->state_lock
);
3269 reset
= (priv
->channels
.params
.rq_wq_type
== MLX5_WQ_TYPE_LINKED_LIST
);
3270 reset
= reset
&& test_bit(MLX5E_STATE_OPENED
, &priv
->state
);
3272 new_channels
.params
= priv
->channels
.params
;
3273 new_channels
.params
.lro_en
= enable
;
3276 priv
->channels
.params
= new_channels
.params
;
3277 err
= mlx5e_modify_tirs_lro(priv
);
3281 err
= mlx5e_open_channels(priv
, &new_channels
);
3285 mlx5e_switch_priv_channels(priv
, &new_channels
, mlx5e_modify_tirs_lro
);
3287 mutex_unlock(&priv
->state_lock
);
3291 static int set_feature_cvlan_filter(struct net_device
*netdev
, bool enable
)
3293 struct mlx5e_priv
*priv
= netdev_priv(netdev
);
3296 mlx5e_enable_cvlan_filter(priv
);
3298 mlx5e_disable_cvlan_filter(priv
);
3303 #ifdef CONFIG_MLX5_ESWITCH
3304 static int set_feature_tc_num_filters(struct net_device
*netdev
, bool enable
)
3306 struct mlx5e_priv
*priv
= netdev_priv(netdev
);
3308 if (!enable
&& mlx5e_tc_num_filters(priv
)) {
3310 "Active offloaded tc filters, can't turn hw_tc_offload off\n");
3318 static int set_feature_rx_all(struct net_device
*netdev
, bool enable
)
3320 struct mlx5e_priv
*priv
= netdev_priv(netdev
);
3321 struct mlx5_core_dev
*mdev
= priv
->mdev
;
3323 return mlx5_set_port_fcs(mdev
, !enable
);
3326 static int set_feature_rx_fcs(struct net_device
*netdev
, bool enable
)
3328 struct mlx5e_priv
*priv
= netdev_priv(netdev
);
3331 mutex_lock(&priv
->state_lock
);
3333 priv
->channels
.params
.scatter_fcs_en
= enable
;
3334 err
= mlx5e_modify_channels_scatter_fcs(&priv
->channels
, enable
);
3336 priv
->channels
.params
.scatter_fcs_en
= !enable
;
3338 mutex_unlock(&priv
->state_lock
);
3343 static int set_feature_rx_vlan(struct net_device
*netdev
, bool enable
)
3345 struct mlx5e_priv
*priv
= netdev_priv(netdev
);
3348 mutex_lock(&priv
->state_lock
);
3350 priv
->channels
.params
.vlan_strip_disable
= !enable
;
3351 if (!test_bit(MLX5E_STATE_OPENED
, &priv
->state
))
3354 err
= mlx5e_modify_channels_vsd(&priv
->channels
, !enable
);
3356 priv
->channels
.params
.vlan_strip_disable
= enable
;
3359 mutex_unlock(&priv
->state_lock
);
3364 #ifdef CONFIG_RFS_ACCEL
3365 static int set_feature_arfs(struct net_device
*netdev
, bool enable
)
3367 struct mlx5e_priv
*priv
= netdev_priv(netdev
);
3371 err
= mlx5e_arfs_enable(priv
);
3373 err
= mlx5e_arfs_disable(priv
);
3379 static int mlx5e_handle_feature(struct net_device
*netdev
,
3380 netdev_features_t
*features
,
3381 netdev_features_t wanted_features
,
3382 netdev_features_t feature
,
3383 mlx5e_feature_handler feature_handler
)
3385 netdev_features_t changes
= wanted_features
^ netdev
->features
;
3386 bool enable
= !!(wanted_features
& feature
);
3389 if (!(changes
& feature
))
3392 err
= feature_handler(netdev
, enable
);
3394 netdev_err(netdev
, "%s feature %pNF failed, err %d\n",
3395 enable
? "Enable" : "Disable", &feature
, err
);
3399 MLX5E_SET_FEATURE(features
, feature
, enable
);
3403 static int mlx5e_set_features(struct net_device
*netdev
,
3404 netdev_features_t features
)
3406 netdev_features_t oper_features
= netdev
->features
;
3409 err
= mlx5e_handle_feature(netdev
, &oper_features
, features
,
3410 NETIF_F_LRO
, set_feature_lro
);
3411 err
|= mlx5e_handle_feature(netdev
, &oper_features
, features
,
3412 NETIF_F_HW_VLAN_CTAG_FILTER
,
3413 set_feature_cvlan_filter
);
3414 #ifdef CONFIG_MLX5_ESWITCH
3415 err
|= mlx5e_handle_feature(netdev
, &oper_features
, features
,
3416 NETIF_F_HW_TC
, set_feature_tc_num_filters
);
3418 err
|= mlx5e_handle_feature(netdev
, &oper_features
, features
,
3419 NETIF_F_RXALL
, set_feature_rx_all
);
3420 err
|= mlx5e_handle_feature(netdev
, &oper_features
, features
,
3421 NETIF_F_RXFCS
, set_feature_rx_fcs
);
3422 err
|= mlx5e_handle_feature(netdev
, &oper_features
, features
,
3423 NETIF_F_HW_VLAN_CTAG_RX
, set_feature_rx_vlan
);
3424 #ifdef CONFIG_RFS_ACCEL
3425 err
|= mlx5e_handle_feature(netdev
, &oper_features
, features
,
3426 NETIF_F_NTUPLE
, set_feature_arfs
);
3430 netdev
->features
= oper_features
;
3437 static netdev_features_t
mlx5e_fix_features(struct net_device
*netdev
,
3438 netdev_features_t features
)
3440 struct mlx5e_priv
*priv
= netdev_priv(netdev
);
3442 mutex_lock(&priv
->state_lock
);
3443 if (!bitmap_empty(priv
->fs
.vlan
.active_svlans
, VLAN_N_VID
)) {
3444 /* HW strips the outer C-tag header, this is a problem
3445 * for S-tag traffic.
3447 features
&= ~NETIF_F_HW_VLAN_CTAG_RX
;
3448 if (!priv
->channels
.params
.vlan_strip_disable
)
3449 netdev_warn(netdev
, "Dropping C-tag vlan stripping offload due to S-tag vlan\n");
3451 mutex_unlock(&priv
->state_lock
);
3456 static int mlx5e_change_mtu(struct net_device
*netdev
, int new_mtu
)
3458 struct mlx5e_priv
*priv
= netdev_priv(netdev
);
3459 struct mlx5e_channels new_channels
= {};
3464 mutex_lock(&priv
->state_lock
);
3466 reset
= !priv
->channels
.params
.lro_en
&&
3467 (priv
->channels
.params
.rq_wq_type
!=
3468 MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ
);
3470 reset
= reset
&& test_bit(MLX5E_STATE_OPENED
, &priv
->state
);
3472 curr_mtu
= netdev
->mtu
;
3473 netdev
->mtu
= new_mtu
;
3476 mlx5e_set_dev_port_mtu(priv
);
3480 new_channels
.params
= priv
->channels
.params
;
3481 err
= mlx5e_open_channels(priv
, &new_channels
);
3483 netdev
->mtu
= curr_mtu
;
3487 mlx5e_switch_priv_channels(priv
, &new_channels
, mlx5e_set_dev_port_mtu
);
3490 mutex_unlock(&priv
->state_lock
);
3494 int mlx5e_hwstamp_set(struct mlx5e_priv
*priv
, struct ifreq
*ifr
)
3496 struct hwtstamp_config config
;
3499 if (!MLX5_CAP_GEN(priv
->mdev
, device_frequency_khz
))
3502 if (copy_from_user(&config
, ifr
->ifr_data
, sizeof(config
)))
3505 /* TX HW timestamp */
3506 switch (config
.tx_type
) {
3507 case HWTSTAMP_TX_OFF
:
3508 case HWTSTAMP_TX_ON
:
3514 mutex_lock(&priv
->state_lock
);
3515 /* RX HW timestamp */
3516 switch (config
.rx_filter
) {
3517 case HWTSTAMP_FILTER_NONE
:
3518 /* Reset CQE compression to Admin default */
3519 mlx5e_modify_rx_cqe_compression_locked(priv
, priv
->channels
.params
.rx_cqe_compress_def
);
3521 case HWTSTAMP_FILTER_ALL
:
3522 case HWTSTAMP_FILTER_SOME
:
3523 case HWTSTAMP_FILTER_PTP_V1_L4_EVENT
:
3524 case HWTSTAMP_FILTER_PTP_V1_L4_SYNC
:
3525 case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ
:
3526 case HWTSTAMP_FILTER_PTP_V2_L4_EVENT
:
3527 case HWTSTAMP_FILTER_PTP_V2_L4_SYNC
:
3528 case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ
:
3529 case HWTSTAMP_FILTER_PTP_V2_L2_EVENT
:
3530 case HWTSTAMP_FILTER_PTP_V2_L2_SYNC
:
3531 case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ
:
3532 case HWTSTAMP_FILTER_PTP_V2_EVENT
:
3533 case HWTSTAMP_FILTER_PTP_V2_SYNC
:
3534 case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ
:
3535 case HWTSTAMP_FILTER_NTP_ALL
:
3536 /* Disable CQE compression */
3537 netdev_warn(priv
->netdev
, "Disabling cqe compression");
3538 err
= mlx5e_modify_rx_cqe_compression_locked(priv
, false);
3540 netdev_err(priv
->netdev
, "Failed disabling cqe compression err=%d\n", err
);
3541 mutex_unlock(&priv
->state_lock
);
3544 config
.rx_filter
= HWTSTAMP_FILTER_ALL
;
3547 mutex_unlock(&priv
->state_lock
);
3551 memcpy(&priv
->tstamp
, &config
, sizeof(config
));
3552 mutex_unlock(&priv
->state_lock
);
3554 return copy_to_user(ifr
->ifr_data
, &config
,
3555 sizeof(config
)) ? -EFAULT
: 0;
3558 int mlx5e_hwstamp_get(struct mlx5e_priv
*priv
, struct ifreq
*ifr
)
3560 struct hwtstamp_config
*cfg
= &priv
->tstamp
;
3562 if (!MLX5_CAP_GEN(priv
->mdev
, device_frequency_khz
))
3565 return copy_to_user(ifr
->ifr_data
, cfg
, sizeof(*cfg
)) ? -EFAULT
: 0;
3568 static int mlx5e_ioctl(struct net_device
*dev
, struct ifreq
*ifr
, int cmd
)
3570 struct mlx5e_priv
*priv
= netdev_priv(dev
);
3574 return mlx5e_hwstamp_set(priv
, ifr
);
3576 return mlx5e_hwstamp_get(priv
, ifr
);
3582 #ifdef CONFIG_MLX5_ESWITCH
3583 static int mlx5e_set_vf_mac(struct net_device
*dev
, int vf
, u8
*mac
)
3585 struct mlx5e_priv
*priv
= netdev_priv(dev
);
3586 struct mlx5_core_dev
*mdev
= priv
->mdev
;
3588 return mlx5_eswitch_set_vport_mac(mdev
->priv
.eswitch
, vf
+ 1, mac
);
3591 static int mlx5e_set_vf_vlan(struct net_device
*dev
, int vf
, u16 vlan
, u8 qos
,
3594 struct mlx5e_priv
*priv
= netdev_priv(dev
);
3595 struct mlx5_core_dev
*mdev
= priv
->mdev
;
3597 if (vlan_proto
!= htons(ETH_P_8021Q
))
3598 return -EPROTONOSUPPORT
;
3600 return mlx5_eswitch_set_vport_vlan(mdev
->priv
.eswitch
, vf
+ 1,
3604 static int mlx5e_set_vf_spoofchk(struct net_device
*dev
, int vf
, bool setting
)
3606 struct mlx5e_priv
*priv
= netdev_priv(dev
);
3607 struct mlx5_core_dev
*mdev
= priv
->mdev
;
3609 return mlx5_eswitch_set_vport_spoofchk(mdev
->priv
.eswitch
, vf
+ 1, setting
);
3612 static int mlx5e_set_vf_trust(struct net_device
*dev
, int vf
, bool setting
)
3614 struct mlx5e_priv
*priv
= netdev_priv(dev
);
3615 struct mlx5_core_dev
*mdev
= priv
->mdev
;
3617 return mlx5_eswitch_set_vport_trust(mdev
->priv
.eswitch
, vf
+ 1, setting
);
3620 static int mlx5e_set_vf_rate(struct net_device
*dev
, int vf
, int min_tx_rate
,
3623 struct mlx5e_priv
*priv
= netdev_priv(dev
);
3624 struct mlx5_core_dev
*mdev
= priv
->mdev
;
3626 return mlx5_eswitch_set_vport_rate(mdev
->priv
.eswitch
, vf
+ 1,
3627 max_tx_rate
, min_tx_rate
);
3630 static int mlx5_vport_link2ifla(u8 esw_link
)
3633 case MLX5_ESW_VPORT_ADMIN_STATE_DOWN
:
3634 return IFLA_VF_LINK_STATE_DISABLE
;
3635 case MLX5_ESW_VPORT_ADMIN_STATE_UP
:
3636 return IFLA_VF_LINK_STATE_ENABLE
;
3638 return IFLA_VF_LINK_STATE_AUTO
;
3641 static int mlx5_ifla_link2vport(u8 ifla_link
)
3643 switch (ifla_link
) {
3644 case IFLA_VF_LINK_STATE_DISABLE
:
3645 return MLX5_ESW_VPORT_ADMIN_STATE_DOWN
;
3646 case IFLA_VF_LINK_STATE_ENABLE
:
3647 return MLX5_ESW_VPORT_ADMIN_STATE_UP
;
3649 return MLX5_ESW_VPORT_ADMIN_STATE_AUTO
;
3652 static int mlx5e_set_vf_link_state(struct net_device
*dev
, int vf
,
3655 struct mlx5e_priv
*priv
= netdev_priv(dev
);
3656 struct mlx5_core_dev
*mdev
= priv
->mdev
;
3658 return mlx5_eswitch_set_vport_state(mdev
->priv
.eswitch
, vf
+ 1,
3659 mlx5_ifla_link2vport(link_state
));
3662 static int mlx5e_get_vf_config(struct net_device
*dev
,
3663 int vf
, struct ifla_vf_info
*ivi
)
3665 struct mlx5e_priv
*priv
= netdev_priv(dev
);
3666 struct mlx5_core_dev
*mdev
= priv
->mdev
;
3669 err
= mlx5_eswitch_get_vport_config(mdev
->priv
.eswitch
, vf
+ 1, ivi
);
3672 ivi
->linkstate
= mlx5_vport_link2ifla(ivi
->linkstate
);
3676 static int mlx5e_get_vf_stats(struct net_device
*dev
,
3677 int vf
, struct ifla_vf_stats
*vf_stats
)
3679 struct mlx5e_priv
*priv
= netdev_priv(dev
);
3680 struct mlx5_core_dev
*mdev
= priv
->mdev
;
3682 return mlx5_eswitch_get_vport_stats(mdev
->priv
.eswitch
, vf
+ 1,
3687 static void mlx5e_add_vxlan_port(struct net_device
*netdev
,
3688 struct udp_tunnel_info
*ti
)
3690 struct mlx5e_priv
*priv
= netdev_priv(netdev
);
3692 if (ti
->type
!= UDP_TUNNEL_TYPE_VXLAN
)
3695 if (!mlx5e_vxlan_allowed(priv
->mdev
))
3698 mlx5e_vxlan_queue_work(priv
, ti
->sa_family
, be16_to_cpu(ti
->port
), 1);
3701 static void mlx5e_del_vxlan_port(struct net_device
*netdev
,
3702 struct udp_tunnel_info
*ti
)
3704 struct mlx5e_priv
*priv
= netdev_priv(netdev
);
3706 if (ti
->type
!= UDP_TUNNEL_TYPE_VXLAN
)
3709 if (!mlx5e_vxlan_allowed(priv
->mdev
))
3712 mlx5e_vxlan_queue_work(priv
, ti
->sa_family
, be16_to_cpu(ti
->port
), 0);
3715 static netdev_features_t
mlx5e_tunnel_features_check(struct mlx5e_priv
*priv
,
3716 struct sk_buff
*skb
,
3717 netdev_features_t features
)
3719 unsigned int offset
= 0;
3720 struct udphdr
*udph
;
3724 switch (vlan_get_protocol(skb
)) {
3725 case htons(ETH_P_IP
):
3726 proto
= ip_hdr(skb
)->protocol
;
3728 case htons(ETH_P_IPV6
):
3729 proto
= ipv6_find_hdr(skb
, &offset
, -1, NULL
, NULL
);
3739 udph
= udp_hdr(skb
);
3740 port
= be16_to_cpu(udph
->dest
);
3742 /* Verify if UDP port is being offloaded by HW */
3743 if (mlx5e_vxlan_lookup_port(priv
, port
))
3748 /* Disable CSUM and GSO if the udp dport is not offloaded by HW */
3749 return features
& ~(NETIF_F_CSUM_MASK
| NETIF_F_GSO_MASK
);
3752 static netdev_features_t
mlx5e_features_check(struct sk_buff
*skb
,
3753 struct net_device
*netdev
,
3754 netdev_features_t features
)
3756 struct mlx5e_priv
*priv
= netdev_priv(netdev
);
3758 features
= vlan_features_check(skb
, features
);
3759 features
= vxlan_features_check(skb
, features
);
3761 #ifdef CONFIG_MLX5_EN_IPSEC
3762 if (mlx5e_ipsec_feature_check(skb
, netdev
, features
))
3766 /* Validate if the tunneled packet is being offloaded by HW */
3767 if (skb
->encapsulation
&&
3768 (features
& NETIF_F_CSUM_MASK
|| features
& NETIF_F_GSO_MASK
))
3769 return mlx5e_tunnel_features_check(priv
, skb
, features
);
3774 static void mlx5e_tx_timeout(struct net_device
*dev
)
3776 struct mlx5e_priv
*priv
= netdev_priv(dev
);
3777 bool sched_work
= false;
3780 netdev_err(dev
, "TX timeout detected\n");
3782 for (i
= 0; i
< priv
->channels
.num
* priv
->channels
.params
.num_tc
; i
++) {
3783 struct mlx5e_txqsq
*sq
= priv
->txq2sq
[i
];
3785 if (!netif_xmit_stopped(netdev_get_tx_queue(dev
, i
)))
3788 clear_bit(MLX5E_SQ_STATE_ENABLED
, &sq
->state
);
3789 netdev_err(dev
, "TX timeout on queue: %d, SQ: 0x%x, CQ: 0x%x, SQ Cons: 0x%x SQ Prod: 0x%x\n",
3790 i
, sq
->sqn
, sq
->cq
.mcq
.cqn
, sq
->cc
, sq
->pc
);
3793 if (sched_work
&& test_bit(MLX5E_STATE_OPENED
, &priv
->state
))
3794 schedule_work(&priv
->tx_timeout_work
);
3797 static int mlx5e_xdp_set(struct net_device
*netdev
, struct bpf_prog
*prog
)
3799 struct mlx5e_priv
*priv
= netdev_priv(netdev
);
3800 struct bpf_prog
*old_prog
;
3802 bool reset
, was_opened
;
3805 mutex_lock(&priv
->state_lock
);
3807 if ((netdev
->features
& NETIF_F_LRO
) && prog
) {
3808 netdev_warn(netdev
, "can't set XDP while LRO is on, disable LRO first\n");
3813 if ((netdev
->features
& NETIF_F_HW_ESP
) && prog
) {
3814 netdev_warn(netdev
, "can't set XDP with IPSec offload\n");
3819 was_opened
= test_bit(MLX5E_STATE_OPENED
, &priv
->state
);
3820 /* no need for full reset when exchanging programs */
3821 reset
= (!priv
->channels
.params
.xdp_prog
|| !prog
);
3823 if (was_opened
&& reset
)
3824 mlx5e_close_locked(netdev
);
3825 if (was_opened
&& !reset
) {
3826 /* num_channels is invariant here, so we can take the
3827 * batched reference right upfront.
3829 prog
= bpf_prog_add(prog
, priv
->channels
.num
);
3831 err
= PTR_ERR(prog
);
3836 /* exchange programs, extra prog reference we got from caller
3837 * as long as we don't fail from this point onwards.
3839 old_prog
= xchg(&priv
->channels
.params
.xdp_prog
, prog
);
3841 bpf_prog_put(old_prog
);
3843 if (reset
) /* change RQ type according to priv->xdp_prog */
3844 mlx5e_set_rq_params(priv
->mdev
, &priv
->channels
.params
);
3846 if (was_opened
&& reset
)
3847 mlx5e_open_locked(netdev
);
3849 if (!test_bit(MLX5E_STATE_OPENED
, &priv
->state
) || reset
)
3852 /* exchanging programs w/o reset, we update ref counts on behalf
3853 * of the channels RQs here.
3855 for (i
= 0; i
< priv
->channels
.num
; i
++) {
3856 struct mlx5e_channel
*c
= priv
->channels
.c
[i
];
3858 clear_bit(MLX5E_RQ_STATE_ENABLED
, &c
->rq
.state
);
3859 napi_synchronize(&c
->napi
);
3860 /* prevent mlx5e_poll_rx_cq from accessing rq->xdp_prog */
3862 old_prog
= xchg(&c
->rq
.xdp_prog
, prog
);
3864 set_bit(MLX5E_RQ_STATE_ENABLED
, &c
->rq
.state
);
3865 /* napi_schedule in case we have missed anything */
3866 napi_schedule(&c
->napi
);
3869 bpf_prog_put(old_prog
);
3873 mutex_unlock(&priv
->state_lock
);
3877 static u32
mlx5e_xdp_query(struct net_device
*dev
)
3879 struct mlx5e_priv
*priv
= netdev_priv(dev
);
3880 const struct bpf_prog
*xdp_prog
;
3883 mutex_lock(&priv
->state_lock
);
3884 xdp_prog
= priv
->channels
.params
.xdp_prog
;
3886 prog_id
= xdp_prog
->aux
->id
;
3887 mutex_unlock(&priv
->state_lock
);
3892 static int mlx5e_xdp(struct net_device
*dev
, struct netdev_bpf
*xdp
)
3894 switch (xdp
->command
) {
3895 case XDP_SETUP_PROG
:
3896 return mlx5e_xdp_set(dev
, xdp
->prog
);
3897 case XDP_QUERY_PROG
:
3898 xdp
->prog_id
= mlx5e_xdp_query(dev
);
3899 xdp
->prog_attached
= !!xdp
->prog_id
;
3906 #ifdef CONFIG_NET_POLL_CONTROLLER
3907 /* Fake "interrupt" called by netpoll (eg netconsole) to send skbs without
3908 * reenabling interrupts.
3910 static void mlx5e_netpoll(struct net_device
*dev
)
3912 struct mlx5e_priv
*priv
= netdev_priv(dev
);
3913 struct mlx5e_channels
*chs
= &priv
->channels
;
3917 for (i
= 0; i
< chs
->num
; i
++)
3918 napi_schedule(&chs
->c
[i
]->napi
);
3922 static const struct net_device_ops mlx5e_netdev_ops
= {
3923 .ndo_open
= mlx5e_open
,
3924 .ndo_stop
= mlx5e_close
,
3925 .ndo_start_xmit
= mlx5e_xmit
,
3926 .ndo_setup_tc
= mlx5e_setup_tc
,
3927 .ndo_select_queue
= mlx5e_select_queue
,
3928 .ndo_get_stats64
= mlx5e_get_stats
,
3929 .ndo_set_rx_mode
= mlx5e_set_rx_mode
,
3930 .ndo_set_mac_address
= mlx5e_set_mac
,
3931 .ndo_vlan_rx_add_vid
= mlx5e_vlan_rx_add_vid
,
3932 .ndo_vlan_rx_kill_vid
= mlx5e_vlan_rx_kill_vid
,
3933 .ndo_set_features
= mlx5e_set_features
,
3934 .ndo_fix_features
= mlx5e_fix_features
,
3935 .ndo_change_mtu
= mlx5e_change_mtu
,
3936 .ndo_do_ioctl
= mlx5e_ioctl
,
3937 .ndo_set_tx_maxrate
= mlx5e_set_tx_maxrate
,
3938 .ndo_udp_tunnel_add
= mlx5e_add_vxlan_port
,
3939 .ndo_udp_tunnel_del
= mlx5e_del_vxlan_port
,
3940 .ndo_features_check
= mlx5e_features_check
,
3941 #ifdef CONFIG_RFS_ACCEL
3942 .ndo_rx_flow_steer
= mlx5e_rx_flow_steer
,
3944 .ndo_tx_timeout
= mlx5e_tx_timeout
,
3945 .ndo_bpf
= mlx5e_xdp
,
3946 #ifdef CONFIG_NET_POLL_CONTROLLER
3947 .ndo_poll_controller
= mlx5e_netpoll
,
3949 #ifdef CONFIG_MLX5_ESWITCH
3950 /* SRIOV E-Switch NDOs */
3951 .ndo_set_vf_mac
= mlx5e_set_vf_mac
,
3952 .ndo_set_vf_vlan
= mlx5e_set_vf_vlan
,
3953 .ndo_set_vf_spoofchk
= mlx5e_set_vf_spoofchk
,
3954 .ndo_set_vf_trust
= mlx5e_set_vf_trust
,
3955 .ndo_set_vf_rate
= mlx5e_set_vf_rate
,
3956 .ndo_get_vf_config
= mlx5e_get_vf_config
,
3957 .ndo_set_vf_link_state
= mlx5e_set_vf_link_state
,
3958 .ndo_get_vf_stats
= mlx5e_get_vf_stats
,
3959 .ndo_has_offload_stats
= mlx5e_has_offload_stats
,
3960 .ndo_get_offload_stats
= mlx5e_get_offload_stats
,
3964 static int mlx5e_check_required_hca_cap(struct mlx5_core_dev
*mdev
)
3966 if (MLX5_CAP_GEN(mdev
, port_type
) != MLX5_CAP_PORT_TYPE_ETH
)
3968 if (!MLX5_CAP_GEN(mdev
, eth_net_offloads
) ||
3969 !MLX5_CAP_GEN(mdev
, nic_flow_table
) ||
3970 !MLX5_CAP_ETH(mdev
, csum_cap
) ||
3971 !MLX5_CAP_ETH(mdev
, max_lso_cap
) ||
3972 !MLX5_CAP_ETH(mdev
, vlan_cap
) ||
3973 !MLX5_CAP_ETH(mdev
, rss_ind_tbl_cap
) ||
3974 MLX5_CAP_FLOWTABLE(mdev
,
3975 flow_table_properties_nic_receive
.max_ft_level
)
3977 mlx5_core_warn(mdev
,
3978 "Not creating net device, some required device capabilities are missing\n");
3981 if (!MLX5_CAP_ETH(mdev
, self_lb_en_modifiable
))
3982 mlx5_core_warn(mdev
, "Self loop back prevention is not supported\n");
3983 if (!MLX5_CAP_GEN(mdev
, cq_moderation
))
3984 mlx5_core_warn(mdev
, "CQ moderation is not supported\n");
3989 u16
mlx5e_get_max_inline_cap(struct mlx5_core_dev
*mdev
)
3991 int bf_buf_size
= (1 << MLX5_CAP_GEN(mdev
, log_bf_reg_size
)) / 2;
3993 return bf_buf_size
-
3994 sizeof(struct mlx5e_tx_wqe
) +
3995 2 /*sizeof(mlx5e_tx_wqe.inline_hdr_start)*/;
3998 void mlx5e_build_default_indir_rqt(u32
*indirection_rqt
, int len
,
4003 for (i
= 0; i
< len
; i
++)
4004 indirection_rqt
[i
] = i
% num_channels
;
4007 static int mlx5e_get_pci_bw(struct mlx5_core_dev
*mdev
, u32
*pci_bw
)
4009 enum pcie_link_width width
;
4010 enum pci_bus_speed speed
;
4013 err
= pcie_get_minimum_link(mdev
->pdev
, &speed
, &width
);
4017 if (speed
== PCI_SPEED_UNKNOWN
|| width
== PCIE_LNK_WIDTH_UNKNOWN
)
4021 case PCIE_SPEED_2_5GT
:
4022 *pci_bw
= 2500 * width
;
4024 case PCIE_SPEED_5_0GT
:
4025 *pci_bw
= 5000 * width
;
4027 case PCIE_SPEED_8_0GT
:
4028 *pci_bw
= 8000 * width
;
4037 static bool cqe_compress_heuristic(u32 link_speed
, u32 pci_bw
)
4039 return (link_speed
&& pci_bw
&&
4040 (pci_bw
< 40000) && (pci_bw
< link_speed
));
4043 static bool hw_lro_heuristic(u32 link_speed
, u32 pci_bw
)
4045 return !(link_speed
&& pci_bw
&&
4046 (pci_bw
<= 16000) && (pci_bw
< link_speed
));
4049 void mlx5e_set_tx_cq_mode_params(struct mlx5e_params
*params
, u8 cq_period_mode
)
4051 params
->tx_cq_moderation
.cq_period_mode
= cq_period_mode
;
4053 params
->tx_cq_moderation
.pkts
=
4054 MLX5E_PARAMS_DEFAULT_TX_CQ_MODERATION_PKTS
;
4055 params
->tx_cq_moderation
.usec
=
4056 MLX5E_PARAMS_DEFAULT_TX_CQ_MODERATION_USEC
;
4058 if (cq_period_mode
== MLX5_CQ_PERIOD_MODE_START_FROM_CQE
)
4059 params
->tx_cq_moderation
.usec
=
4060 MLX5E_PARAMS_DEFAULT_TX_CQ_MODERATION_USEC_FROM_CQE
;
4062 MLX5E_SET_PFLAG(params
, MLX5E_PFLAG_TX_CQE_BASED_MODER
,
4063 params
->tx_cq_moderation
.cq_period_mode
==
4064 MLX5_CQ_PERIOD_MODE_START_FROM_CQE
);
4067 void mlx5e_set_rx_cq_mode_params(struct mlx5e_params
*params
, u8 cq_period_mode
)
4069 params
->rx_cq_moderation
.cq_period_mode
= cq_period_mode
;
4071 params
->rx_cq_moderation
.pkts
=
4072 MLX5E_PARAMS_DEFAULT_RX_CQ_MODERATION_PKTS
;
4073 params
->rx_cq_moderation
.usec
=
4074 MLX5E_PARAMS_DEFAULT_RX_CQ_MODERATION_USEC
;
4076 if (cq_period_mode
== MLX5_CQ_PERIOD_MODE_START_FROM_CQE
)
4077 params
->rx_cq_moderation
.usec
=
4078 MLX5E_PARAMS_DEFAULT_RX_CQ_MODERATION_USEC_FROM_CQE
;
4080 if (params
->rx_am_enabled
)
4081 params
->rx_cq_moderation
=
4082 mlx5e_am_get_def_profile(cq_period_mode
);
4084 MLX5E_SET_PFLAG(params
, MLX5E_PFLAG_RX_CQE_BASED_MODER
,
4085 params
->rx_cq_moderation
.cq_period_mode
==
4086 MLX5_CQ_PERIOD_MODE_START_FROM_CQE
);
4089 u32
mlx5e_choose_lro_timeout(struct mlx5_core_dev
*mdev
, u32 wanted_timeout
)
4093 /* The supported periods are organized in ascending order */
4094 for (i
= 0; i
< MLX5E_LRO_TIMEOUT_ARR_SIZE
- 1; i
++)
4095 if (MLX5_CAP_ETH(mdev
, lro_timer_supported_periods
[i
]) >= wanted_timeout
)
4098 return MLX5_CAP_ETH(mdev
, lro_timer_supported_periods
[i
]);
4101 void mlx5e_build_nic_params(struct mlx5_core_dev
*mdev
,
4102 struct mlx5e_params
*params
,
4105 u8 rx_cq_period_mode
;
4109 params
->num_channels
= max_channels
;
4112 mlx5e_get_max_linkspeed(mdev
, &link_speed
);
4113 mlx5e_get_pci_bw(mdev
, &pci_bw
);
4114 mlx5_core_dbg(mdev
, "Max link speed = %d, PCI BW = %d\n",
4115 link_speed
, pci_bw
);
4118 params
->log_sq_size
= is_kdump_kernel() ?
4119 MLX5E_PARAMS_MINIMUM_LOG_SQ_SIZE
:
4120 MLX5E_PARAMS_DEFAULT_LOG_SQ_SIZE
;
4122 /* set CQE compression */
4123 params
->rx_cqe_compress_def
= false;
4124 if (MLX5_CAP_GEN(mdev
, cqe_compression
) &&
4125 MLX5_CAP_GEN(mdev
, vport_group_manager
))
4126 params
->rx_cqe_compress_def
= cqe_compress_heuristic(link_speed
, pci_bw
);
4128 MLX5E_SET_PFLAG(params
, MLX5E_PFLAG_RX_CQE_COMPRESS
, params
->rx_cqe_compress_def
);
4129 MLX5E_SET_PFLAG(params
, MLX5E_PFLAG_RX_NO_CSUM_COMPLETE
, false);
4132 mlx5e_set_rq_params(mdev
, params
);
4136 /* TODO: && MLX5_CAP_ETH(mdev, lro_cap) */
4137 if (params
->rq_wq_type
== MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ
)
4138 params
->lro_en
= hw_lro_heuristic(link_speed
, pci_bw
);
4139 params
->lro_timeout
= mlx5e_choose_lro_timeout(mdev
, MLX5E_DEFAULT_LRO_TIMEOUT
);
4141 /* CQ moderation params */
4142 rx_cq_period_mode
= MLX5_CAP_GEN(mdev
, cq_period_start_from_cqe
) ?
4143 MLX5_CQ_PERIOD_MODE_START_FROM_CQE
:
4144 MLX5_CQ_PERIOD_MODE_START_FROM_EQE
;
4145 params
->rx_am_enabled
= MLX5_CAP_GEN(mdev
, cq_moderation
);
4146 mlx5e_set_rx_cq_mode_params(params
, rx_cq_period_mode
);
4147 mlx5e_set_tx_cq_mode_params(params
, MLX5_CQ_PERIOD_MODE_START_FROM_EQE
);
4150 params
->tx_max_inline
= mlx5e_get_max_inline_cap(mdev
);
4151 params
->tx_min_inline_mode
= mlx5e_params_calculate_tx_min_inline(mdev
);
4154 params
->rss_hfunc
= ETH_RSS_HASH_XOR
;
4155 netdev_rss_key_fill(params
->toeplitz_hash_key
, sizeof(params
->toeplitz_hash_key
));
4156 mlx5e_build_default_indir_rqt(params
->indirection_rqt
,
4157 MLX5E_INDIR_RQT_SIZE
, max_channels
);
4160 static void mlx5e_build_nic_netdev_priv(struct mlx5_core_dev
*mdev
,
4161 struct net_device
*netdev
,
4162 const struct mlx5e_profile
*profile
,
4165 struct mlx5e_priv
*priv
= netdev_priv(netdev
);
4168 priv
->netdev
= netdev
;
4169 priv
->profile
= profile
;
4170 priv
->ppriv
= ppriv
;
4171 priv
->msglevel
= MLX5E_MSG_LEVEL
;
4172 priv
->hard_mtu
= MLX5E_ETH_HARD_MTU
;
4174 mlx5e_build_nic_params(mdev
, &priv
->channels
.params
, profile
->max_nch(mdev
));
4176 mutex_init(&priv
->state_lock
);
4178 INIT_WORK(&priv
->update_carrier_work
, mlx5e_update_carrier_work
);
4179 INIT_WORK(&priv
->set_rx_mode_work
, mlx5e_set_rx_mode_work
);
4180 INIT_WORK(&priv
->tx_timeout_work
, mlx5e_tx_timeout_work
);
4181 INIT_DELAYED_WORK(&priv
->update_stats_work
, mlx5e_update_stats_work
);
4183 mlx5e_timestamp_init(priv
);
4186 static void mlx5e_set_netdev_dev_addr(struct net_device
*netdev
)
4188 struct mlx5e_priv
*priv
= netdev_priv(netdev
);
4190 mlx5_query_nic_vport_mac_address(priv
->mdev
, 0, netdev
->dev_addr
);
4191 if (is_zero_ether_addr(netdev
->dev_addr
) &&
4192 !MLX5_CAP_GEN(priv
->mdev
, vport_group_manager
)) {
4193 eth_hw_addr_random(netdev
);
4194 mlx5_core_info(priv
->mdev
, "Assigned random MAC address %pM\n", netdev
->dev_addr
);
4198 #if IS_ENABLED(CONFIG_MLX5_ESWITCH)
4199 static const struct switchdev_ops mlx5e_switchdev_ops
= {
4200 .switchdev_port_attr_get
= mlx5e_attr_get
,
4204 static void mlx5e_build_nic_netdev(struct net_device
*netdev
)
4206 struct mlx5e_priv
*priv
= netdev_priv(netdev
);
4207 struct mlx5_core_dev
*mdev
= priv
->mdev
;
4211 SET_NETDEV_DEV(netdev
, &mdev
->pdev
->dev
);
4213 netdev
->netdev_ops
= &mlx5e_netdev_ops
;
4215 #ifdef CONFIG_MLX5_CORE_EN_DCB
4216 if (MLX5_CAP_GEN(mdev
, vport_group_manager
) && MLX5_CAP_GEN(mdev
, qos
))
4217 netdev
->dcbnl_ops
= &mlx5e_dcbnl_ops
;
4220 netdev
->watchdog_timeo
= 15 * HZ
;
4222 netdev
->ethtool_ops
= &mlx5e_ethtool_ops
;
4224 netdev
->vlan_features
|= NETIF_F_SG
;
4225 netdev
->vlan_features
|= NETIF_F_IP_CSUM
;
4226 netdev
->vlan_features
|= NETIF_F_IPV6_CSUM
;
4227 netdev
->vlan_features
|= NETIF_F_GRO
;
4228 netdev
->vlan_features
|= NETIF_F_TSO
;
4229 netdev
->vlan_features
|= NETIF_F_TSO6
;
4230 netdev
->vlan_features
|= NETIF_F_RXCSUM
;
4231 netdev
->vlan_features
|= NETIF_F_RXHASH
;
4233 if (!!MLX5_CAP_ETH(mdev
, lro_cap
))
4234 netdev
->vlan_features
|= NETIF_F_LRO
;
4236 netdev
->hw_features
= netdev
->vlan_features
;
4237 netdev
->hw_features
|= NETIF_F_HW_VLAN_CTAG_TX
;
4238 netdev
->hw_features
|= NETIF_F_HW_VLAN_CTAG_RX
;
4239 netdev
->hw_features
|= NETIF_F_HW_VLAN_CTAG_FILTER
;
4240 netdev
->hw_features
|= NETIF_F_HW_VLAN_STAG_TX
;
4242 if (mlx5e_vxlan_allowed(mdev
) || MLX5_CAP_ETH(mdev
, tunnel_stateless_gre
)) {
4243 netdev
->hw_features
|= NETIF_F_GSO_PARTIAL
;
4244 netdev
->hw_enc_features
|= NETIF_F_IP_CSUM
;
4245 netdev
->hw_enc_features
|= NETIF_F_IPV6_CSUM
;
4246 netdev
->hw_enc_features
|= NETIF_F_TSO
;
4247 netdev
->hw_enc_features
|= NETIF_F_TSO6
;
4248 netdev
->hw_enc_features
|= NETIF_F_GSO_PARTIAL
;
4251 if (mlx5e_vxlan_allowed(mdev
)) {
4252 netdev
->hw_features
|= NETIF_F_GSO_UDP_TUNNEL
|
4253 NETIF_F_GSO_UDP_TUNNEL_CSUM
;
4254 netdev
->hw_enc_features
|= NETIF_F_GSO_UDP_TUNNEL
|
4255 NETIF_F_GSO_UDP_TUNNEL_CSUM
;
4256 netdev
->gso_partial_features
= NETIF_F_GSO_UDP_TUNNEL_CSUM
;
4259 if (MLX5_CAP_ETH(mdev
, tunnel_stateless_gre
)) {
4260 netdev
->hw_features
|= NETIF_F_GSO_GRE
|
4261 NETIF_F_GSO_GRE_CSUM
;
4262 netdev
->hw_enc_features
|= NETIF_F_GSO_GRE
|
4263 NETIF_F_GSO_GRE_CSUM
;
4264 netdev
->gso_partial_features
|= NETIF_F_GSO_GRE
|
4265 NETIF_F_GSO_GRE_CSUM
;
4268 mlx5_query_port_fcs(mdev
, &fcs_supported
, &fcs_enabled
);
4271 netdev
->hw_features
|= NETIF_F_RXALL
;
4273 if (MLX5_CAP_ETH(mdev
, scatter_fcs
))
4274 netdev
->hw_features
|= NETIF_F_RXFCS
;
4276 netdev
->features
= netdev
->hw_features
;
4277 if (!priv
->channels
.params
.lro_en
)
4278 netdev
->features
&= ~NETIF_F_LRO
;
4281 netdev
->features
&= ~NETIF_F_RXALL
;
4283 if (!priv
->channels
.params
.scatter_fcs_en
)
4284 netdev
->features
&= ~NETIF_F_RXFCS
;
4286 #define FT_CAP(f) MLX5_CAP_FLOWTABLE(mdev, flow_table_properties_nic_receive.f)
4287 if (FT_CAP(flow_modify_en
) &&
4288 FT_CAP(modify_root
) &&
4289 FT_CAP(identified_miss_table_mode
) &&
4290 FT_CAP(flow_table_modify
)) {
4291 netdev
->hw_features
|= NETIF_F_HW_TC
;
4292 #ifdef CONFIG_RFS_ACCEL
4293 netdev
->hw_features
|= NETIF_F_NTUPLE
;
4297 netdev
->features
|= NETIF_F_HIGHDMA
;
4298 netdev
->features
|= NETIF_F_HW_VLAN_STAG_FILTER
;
4300 netdev
->priv_flags
|= IFF_UNICAST_FLT
;
4302 mlx5e_set_netdev_dev_addr(netdev
);
4304 #if IS_ENABLED(CONFIG_MLX5_ESWITCH)
4305 if (MLX5_ESWITCH_MANAGER(mdev
))
4306 netdev
->switchdev_ops
= &mlx5e_switchdev_ops
;
4309 mlx5e_ipsec_build_netdev(priv
);
4312 static void mlx5e_create_q_counter(struct mlx5e_priv
*priv
)
4314 struct mlx5_core_dev
*mdev
= priv
->mdev
;
4317 err
= mlx5_core_alloc_q_counter(mdev
, &priv
->q_counter
);
4319 mlx5_core_warn(mdev
, "alloc queue counter failed, %d\n", err
);
4320 priv
->q_counter
= 0;
4324 static void mlx5e_destroy_q_counter(struct mlx5e_priv
*priv
)
4326 if (!priv
->q_counter
)
4329 mlx5_core_dealloc_q_counter(priv
->mdev
, priv
->q_counter
);
4332 static void mlx5e_nic_init(struct mlx5_core_dev
*mdev
,
4333 struct net_device
*netdev
,
4334 const struct mlx5e_profile
*profile
,
4337 struct mlx5e_priv
*priv
= netdev_priv(netdev
);
4340 mlx5e_build_nic_netdev_priv(mdev
, netdev
, profile
, ppriv
);
4341 err
= mlx5e_ipsec_init(priv
);
4343 mlx5_core_err(mdev
, "IPSec initialization failed, %d\n", err
);
4344 mlx5e_build_nic_netdev(netdev
);
4345 mlx5e_vxlan_init(priv
);
4348 static void mlx5e_nic_cleanup(struct mlx5e_priv
*priv
)
4350 mlx5e_ipsec_cleanup(priv
);
4351 mlx5e_vxlan_cleanup(priv
);
4353 if (priv
->channels
.params
.xdp_prog
)
4354 bpf_prog_put(priv
->channels
.params
.xdp_prog
);
4357 static int mlx5e_init_nic_rx(struct mlx5e_priv
*priv
)
4359 struct mlx5_core_dev
*mdev
= priv
->mdev
;
4362 err
= mlx5e_create_indirect_rqt(priv
);
4366 err
= mlx5e_create_direct_rqts(priv
);
4368 goto err_destroy_indirect_rqts
;
4370 err
= mlx5e_create_indirect_tirs(priv
);
4372 goto err_destroy_direct_rqts
;
4374 err
= mlx5e_create_direct_tirs(priv
);
4376 goto err_destroy_indirect_tirs
;
4378 err
= mlx5e_create_flow_steering(priv
);
4380 mlx5_core_warn(mdev
, "create flow steering failed, %d\n", err
);
4381 goto err_destroy_direct_tirs
;
4384 err
= mlx5e_tc_init(priv
);
4386 goto err_destroy_flow_steering
;
4390 err_destroy_flow_steering
:
4391 mlx5e_destroy_flow_steering(priv
);
4392 err_destroy_direct_tirs
:
4393 mlx5e_destroy_direct_tirs(priv
);
4394 err_destroy_indirect_tirs
:
4395 mlx5e_destroy_indirect_tirs(priv
);
4396 err_destroy_direct_rqts
:
4397 mlx5e_destroy_direct_rqts(priv
);
4398 err_destroy_indirect_rqts
:
4399 mlx5e_destroy_rqt(priv
, &priv
->indir_rqt
);
4403 static void mlx5e_cleanup_nic_rx(struct mlx5e_priv
*priv
)
4405 mlx5e_tc_cleanup(priv
);
4406 mlx5e_destroy_flow_steering(priv
);
4407 mlx5e_destroy_direct_tirs(priv
);
4408 mlx5e_destroy_indirect_tirs(priv
);
4409 mlx5e_destroy_direct_rqts(priv
);
4410 mlx5e_destroy_rqt(priv
, &priv
->indir_rqt
);
4413 static int mlx5e_init_nic_tx(struct mlx5e_priv
*priv
)
4417 err
= mlx5e_create_tises(priv
);
4419 mlx5_core_warn(priv
->mdev
, "create tises failed, %d\n", err
);
4423 #ifdef CONFIG_MLX5_CORE_EN_DCB
4424 mlx5e_dcbnl_initialize(priv
);
4429 static void mlx5e_nic_enable(struct mlx5e_priv
*priv
)
4431 struct net_device
*netdev
= priv
->netdev
;
4432 struct mlx5_core_dev
*mdev
= priv
->mdev
;
4435 mlx5e_init_l2_addr(priv
);
4437 /* Marking the link as currently not needed by the Driver */
4438 if (!netif_running(netdev
))
4439 mlx5_set_port_admin_status(mdev
, MLX5_PORT_DOWN
);
4441 /* MTU range: 68 - hw-specific max */
4442 netdev
->min_mtu
= ETH_MIN_MTU
;
4443 mlx5_query_port_max_mtu(priv
->mdev
, &max_mtu
, 1);
4444 netdev
->max_mtu
= MLX5E_HW2SW_MTU(priv
, max_mtu
);
4445 mlx5e_set_dev_port_mtu(priv
);
4447 mlx5_lag_add(mdev
, netdev
);
4449 mlx5e_enable_async_events(priv
);
4451 if (MLX5_ESWITCH_MANAGER(priv
->mdev
))
4452 mlx5e_register_vport_reps(priv
);
4454 if (netdev
->reg_state
!= NETREG_REGISTERED
)
4456 #ifdef CONFIG_MLX5_CORE_EN_DCB
4457 mlx5e_dcbnl_init_app(priv
);
4460 queue_work(priv
->wq
, &priv
->set_rx_mode_work
);
4463 if (netif_running(netdev
))
4465 netif_device_attach(netdev
);
4469 static void mlx5e_nic_disable(struct mlx5e_priv
*priv
)
4471 struct mlx5_core_dev
*mdev
= priv
->mdev
;
4473 #ifdef CONFIG_MLX5_CORE_EN_DCB
4474 if (priv
->netdev
->reg_state
== NETREG_REGISTERED
)
4475 mlx5e_dcbnl_delete_app(priv
);
4479 if (netif_running(priv
->netdev
))
4480 mlx5e_close(priv
->netdev
);
4481 netif_device_detach(priv
->netdev
);
4484 queue_work(priv
->wq
, &priv
->set_rx_mode_work
);
4486 if (MLX5_ESWITCH_MANAGER(priv
->mdev
))
4487 mlx5e_unregister_vport_reps(priv
);
4489 mlx5e_disable_async_events(priv
);
4490 mlx5_lag_remove(mdev
);
4493 static const struct mlx5e_profile mlx5e_nic_profile
= {
4494 .init
= mlx5e_nic_init
,
4495 .cleanup
= mlx5e_nic_cleanup
,
4496 .init_rx
= mlx5e_init_nic_rx
,
4497 .cleanup_rx
= mlx5e_cleanup_nic_rx
,
4498 .init_tx
= mlx5e_init_nic_tx
,
4499 .cleanup_tx
= mlx5e_cleanup_nic_tx
,
4500 .enable
= mlx5e_nic_enable
,
4501 .disable
= mlx5e_nic_disable
,
4502 .update_stats
= mlx5e_update_ndo_stats
,
4503 .max_nch
= mlx5e_get_max_num_channels
,
4504 .update_carrier
= mlx5e_update_carrier
,
4505 .rx_handlers
.handle_rx_cqe
= mlx5e_handle_rx_cqe
,
4506 .rx_handlers
.handle_rx_cqe_mpwqe
= mlx5e_handle_rx_cqe_mpwrq
,
4507 .max_tc
= MLX5E_MAX_NUM_TC
,
4510 /* mlx5e generic netdev management API (move to en_common.c) */
4512 struct net_device
*mlx5e_create_netdev(struct mlx5_core_dev
*mdev
,
4513 const struct mlx5e_profile
*profile
,
4516 int nch
= profile
->max_nch(mdev
);
4517 struct net_device
*netdev
;
4518 struct mlx5e_priv
*priv
;
4520 netdev
= alloc_etherdev_mqs(sizeof(struct mlx5e_priv
),
4521 nch
* profile
->max_tc
,
4524 mlx5_core_err(mdev
, "alloc_etherdev_mqs() failed\n");
4528 #ifdef CONFIG_RFS_ACCEL
4529 netdev
->rx_cpu_rmap
= mdev
->rmap
;
4532 profile
->init(mdev
, netdev
, profile
, ppriv
);
4534 netif_carrier_off(netdev
);
4536 priv
= netdev_priv(netdev
);
4538 priv
->wq
= create_singlethread_workqueue("mlx5e");
4540 goto err_cleanup_nic
;
4545 if (profile
->cleanup
)
4546 profile
->cleanup(priv
);
4547 free_netdev(netdev
);
4552 int mlx5e_attach_netdev(struct mlx5e_priv
*priv
)
4554 struct mlx5_core_dev
*mdev
= priv
->mdev
;
4555 const struct mlx5e_profile
*profile
;
4559 profile
= priv
->profile
;
4560 clear_bit(MLX5E_STATE_DESTROYING
, &priv
->state
);
4562 /* max number of channels may have changed */
4563 max_nch
= mlx5e_get_max_num_channels(priv
->mdev
);
4564 if (priv
->channels
.params
.num_channels
> max_nch
) {
4565 mlx5_core_warn(priv
->mdev
, "MLX5E: Reducing number of channels to %d\n", max_nch
);
4566 priv
->channels
.params
.num_channels
= max_nch
;
4567 mlx5e_build_default_indir_rqt(priv
->channels
.params
.indirection_rqt
,
4568 MLX5E_INDIR_RQT_SIZE
, max_nch
);
4571 err
= profile
->init_tx(priv
);
4575 err
= mlx5e_open_drop_rq(mdev
, &priv
->drop_rq
);
4577 mlx5_core_err(mdev
, "open drop rq failed, %d\n", err
);
4578 goto err_cleanup_tx
;
4581 err
= profile
->init_rx(priv
);
4583 goto err_close_drop_rq
;
4585 mlx5e_create_q_counter(priv
);
4587 if (profile
->enable
)
4588 profile
->enable(priv
);
4593 mlx5e_close_drop_rq(&priv
->drop_rq
);
4596 profile
->cleanup_tx(priv
);
4602 void mlx5e_detach_netdev(struct mlx5e_priv
*priv
)
4604 const struct mlx5e_profile
*profile
= priv
->profile
;
4606 set_bit(MLX5E_STATE_DESTROYING
, &priv
->state
);
4608 if (profile
->disable
)
4609 profile
->disable(priv
);
4610 flush_workqueue(priv
->wq
);
4612 mlx5e_destroy_q_counter(priv
);
4613 profile
->cleanup_rx(priv
);
4614 mlx5e_close_drop_rq(&priv
->drop_rq
);
4615 profile
->cleanup_tx(priv
);
4616 cancel_delayed_work_sync(&priv
->update_stats_work
);
4619 void mlx5e_destroy_netdev(struct mlx5e_priv
*priv
)
4621 const struct mlx5e_profile
*profile
= priv
->profile
;
4622 struct net_device
*netdev
= priv
->netdev
;
4624 destroy_workqueue(priv
->wq
);
4625 if (profile
->cleanup
)
4626 profile
->cleanup(priv
);
4627 free_netdev(netdev
);
4630 /* mlx5e_attach and mlx5e_detach scope should be only creating/destroying
4631 * hardware contexts and to connect it to the current netdev.
4633 static int mlx5e_attach(struct mlx5_core_dev
*mdev
, void *vpriv
)
4635 struct mlx5e_priv
*priv
= vpriv
;
4636 struct net_device
*netdev
= priv
->netdev
;
4639 if (netif_device_present(netdev
))
4642 err
= mlx5e_create_mdev_resources(mdev
);
4646 err
= mlx5e_attach_netdev(priv
);
4648 mlx5e_destroy_mdev_resources(mdev
);
4655 static void mlx5e_detach(struct mlx5_core_dev
*mdev
, void *vpriv
)
4657 struct mlx5e_priv
*priv
= vpriv
;
4658 struct net_device
*netdev
= priv
->netdev
;
4660 if (!netif_device_present(netdev
))
4663 mlx5e_detach_netdev(priv
);
4664 mlx5e_destroy_mdev_resources(mdev
);
4667 static void *mlx5e_add(struct mlx5_core_dev
*mdev
)
4669 struct net_device
*netdev
;
4674 err
= mlx5e_check_required_hca_cap(mdev
);
4678 #ifdef CONFIG_MLX5_ESWITCH
4679 if (MLX5_ESWITCH_MANAGER(mdev
)) {
4680 rpriv
= mlx5e_alloc_nic_rep_priv(mdev
);
4682 mlx5_core_warn(mdev
, "Failed to alloc NIC rep priv data\n");
4688 netdev
= mlx5e_create_netdev(mdev
, &mlx5e_nic_profile
, rpriv
);
4690 mlx5_core_err(mdev
, "mlx5e_create_netdev failed\n");
4691 goto err_free_rpriv
;
4694 priv
= netdev_priv(netdev
);
4696 err
= mlx5e_attach(mdev
, priv
);
4698 mlx5_core_err(mdev
, "mlx5e_attach failed, %d\n", err
);
4699 goto err_destroy_netdev
;
4702 err
= register_netdev(netdev
);
4704 mlx5_core_err(mdev
, "register_netdev failed, %d\n", err
);
4708 #ifdef CONFIG_MLX5_CORE_EN_DCB
4709 mlx5e_dcbnl_init_app(priv
);
4714 mlx5e_detach(mdev
, priv
);
4716 mlx5e_destroy_netdev(priv
);
4722 static void mlx5e_remove(struct mlx5_core_dev
*mdev
, void *vpriv
)
4724 struct mlx5e_priv
*priv
= vpriv
;
4725 void *ppriv
= priv
->ppriv
;
4727 #ifdef CONFIG_MLX5_CORE_EN_DCB
4728 mlx5e_dcbnl_delete_app(priv
);
4730 unregister_netdev(priv
->netdev
);
4731 mlx5e_detach(mdev
, vpriv
);
4732 mlx5e_destroy_netdev(priv
);
4736 static void *mlx5e_get_netdev(void *vpriv
)
4738 struct mlx5e_priv
*priv
= vpriv
;
4740 return priv
->netdev
;
4743 static struct mlx5_interface mlx5e_interface
= {
4745 .remove
= mlx5e_remove
,
4746 .attach
= mlx5e_attach
,
4747 .detach
= mlx5e_detach
,
4748 .event
= mlx5e_async_event
,
4749 .protocol
= MLX5_INTERFACE_PROTOCOL_ETH
,
4750 .get_dev
= mlx5e_get_netdev
,
4753 void mlx5e_init(void)
4755 mlx5e_ipsec_build_inverse_table();
4756 mlx5e_build_ptys2ethtool_map();
4757 mlx5_register_interface(&mlx5e_interface
);
4760 void mlx5e_cleanup(void)
4762 mlx5_unregister_interface(&mlx5e_interface
);