1 // SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
2 /* Copyright (c) 2019 Mellanox Technologies. */
7 #include "en_accel/en_accel.h"
8 #include "accel/ipsec.h"
9 #include "fpga/ipsec.h"
11 static bool mlx5e_rx_is_xdp(struct mlx5e_params
*params
,
12 struct mlx5e_xsk_param
*xsk
)
14 return params
->xdp_prog
|| xsk
;
17 u16
mlx5e_get_linear_rq_headroom(struct mlx5e_params
*params
,
18 struct mlx5e_xsk_param
*xsk
)
25 headroom
= NET_IP_ALIGN
;
26 if (mlx5e_rx_is_xdp(params
, xsk
))
27 headroom
+= XDP_PACKET_HEADROOM
;
29 headroom
+= MLX5_RX_HEADROOM
;
34 u32
mlx5e_rx_get_min_frag_sz(struct mlx5e_params
*params
,
35 struct mlx5e_xsk_param
*xsk
)
37 u32 hw_mtu
= MLX5E_SW2HW_MTU(params
, params
->sw_mtu
);
38 u16 linear_rq_headroom
= mlx5e_get_linear_rq_headroom(params
, xsk
);
40 return linear_rq_headroom
+ hw_mtu
;
43 static u32
mlx5e_rx_get_linear_frag_sz(struct mlx5e_params
*params
,
44 struct mlx5e_xsk_param
*xsk
)
46 u32 frag_sz
= mlx5e_rx_get_min_frag_sz(params
, xsk
);
48 /* AF_XDP doesn't build SKBs in place. */
50 frag_sz
= MLX5_SKB_FRAG_SZ(frag_sz
);
52 /* XDP in mlx5e doesn't support multiple packets per page. AF_XDP is a
53 * special case. It can run with frames smaller than a page, as it
54 * doesn't allocate pages dynamically. However, here we pretend that
55 * fragments are page-sized: it allows to treat XSK frames like pages
56 * by redirecting alloc and free operations to XSK rings and by using
57 * the fact there are no multiple packets per "page" (which is a frame).
58 * The latter is important, because frames may come in a random order,
59 * and we will have trouble assemblying a real page of multiple frames.
61 if (mlx5e_rx_is_xdp(params
, xsk
))
62 frag_sz
= max_t(u32
, frag_sz
, PAGE_SIZE
);
64 /* Even if we can go with a smaller fragment size, we must not put
65 * multiple packets into a single frame.
68 frag_sz
= max_t(u32
, frag_sz
, xsk
->chunk_size
);
73 u8
mlx5e_mpwqe_log_pkts_per_wqe(struct mlx5e_params
*params
,
74 struct mlx5e_xsk_param
*xsk
)
76 u32 linear_frag_sz
= mlx5e_rx_get_linear_frag_sz(params
, xsk
);
78 return MLX5_MPWRQ_LOG_WQE_SZ
- order_base_2(linear_frag_sz
);
81 bool mlx5e_rx_is_linear_skb(struct mlx5e_params
*params
,
82 struct mlx5e_xsk_param
*xsk
)
84 /* AF_XDP allocates SKBs on XDP_PASS - ensure they don't occupy more
85 * than one page. For this, check both with and without xsk.
87 u32 linear_frag_sz
= max(mlx5e_rx_get_linear_frag_sz(params
, xsk
),
88 mlx5e_rx_get_linear_frag_sz(params
, NULL
));
90 return !params
->lro_en
&& linear_frag_sz
<= PAGE_SIZE
;
93 bool mlx5e_verify_rx_mpwqe_strides(struct mlx5_core_dev
*mdev
,
94 u8 log_stride_sz
, u8 log_num_strides
)
96 if (log_stride_sz
+ log_num_strides
!= MLX5_MPWRQ_LOG_WQE_SZ
)
99 if (log_stride_sz
< MLX5_MPWQE_LOG_STRIDE_SZ_BASE
||
100 log_stride_sz
> MLX5_MPWQE_LOG_STRIDE_SZ_MAX
)
103 if (log_num_strides
> MLX5_MPWQE_LOG_NUM_STRIDES_MAX
)
106 if (MLX5_CAP_GEN(mdev
, ext_stride_num_range
))
107 return log_num_strides
>= MLX5_MPWQE_LOG_NUM_STRIDES_EXT_BASE
;
109 return log_num_strides
>= MLX5_MPWQE_LOG_NUM_STRIDES_BASE
;
112 bool mlx5e_rx_mpwqe_is_linear_skb(struct mlx5_core_dev
*mdev
,
113 struct mlx5e_params
*params
,
114 struct mlx5e_xsk_param
*xsk
)
119 if (!mlx5e_rx_is_linear_skb(params
, xsk
))
122 log_stride_sz
= order_base_2(mlx5e_rx_get_linear_frag_sz(params
, xsk
));
123 log_num_strides
= MLX5_MPWRQ_LOG_WQE_SZ
- log_stride_sz
;
125 return mlx5e_verify_rx_mpwqe_strides(mdev
, log_stride_sz
, log_num_strides
);
128 u8
mlx5e_mpwqe_get_log_rq_size(struct mlx5e_params
*params
,
129 struct mlx5e_xsk_param
*xsk
)
131 u8 log_pkts_per_wqe
= mlx5e_mpwqe_log_pkts_per_wqe(params
, xsk
);
133 /* Numbers are unsigned, don't subtract to avoid underflow. */
134 if (params
->log_rq_mtu_frames
<
135 log_pkts_per_wqe
+ MLX5E_PARAMS_MINIMUM_LOG_RQ_SIZE_MPW
)
136 return MLX5E_PARAMS_MINIMUM_LOG_RQ_SIZE_MPW
;
138 return params
->log_rq_mtu_frames
- log_pkts_per_wqe
;
141 u8
mlx5e_mpwqe_get_log_stride_size(struct mlx5_core_dev
*mdev
,
142 struct mlx5e_params
*params
,
143 struct mlx5e_xsk_param
*xsk
)
145 if (mlx5e_rx_mpwqe_is_linear_skb(mdev
, params
, xsk
))
146 return order_base_2(mlx5e_rx_get_linear_frag_sz(params
, xsk
));
148 return MLX5_MPWRQ_DEF_LOG_STRIDE_SZ(mdev
);
151 u8
mlx5e_mpwqe_get_log_num_strides(struct mlx5_core_dev
*mdev
,
152 struct mlx5e_params
*params
,
153 struct mlx5e_xsk_param
*xsk
)
155 return MLX5_MPWRQ_LOG_WQE_SZ
-
156 mlx5e_mpwqe_get_log_stride_size(mdev
, params
, xsk
);
159 u16
mlx5e_get_rq_headroom(struct mlx5_core_dev
*mdev
,
160 struct mlx5e_params
*params
,
161 struct mlx5e_xsk_param
*xsk
)
163 bool is_linear_skb
= (params
->rq_wq_type
== MLX5_WQ_TYPE_CYCLIC
) ?
164 mlx5e_rx_is_linear_skb(params
, xsk
) :
165 mlx5e_rx_mpwqe_is_linear_skb(mdev
, params
, xsk
);
167 return is_linear_skb
? mlx5e_get_linear_rq_headroom(params
, xsk
) : 0;
170 struct mlx5e_lro_param
mlx5e_get_lro_param(struct mlx5e_params
*params
)
172 struct mlx5e_lro_param lro_param
;
174 lro_param
= (struct mlx5e_lro_param
) {
175 .enabled
= params
->lro_en
,
176 .timeout
= params
->packet_merge_timeout
,
182 u16
mlx5e_calc_sq_stop_room(struct mlx5_core_dev
*mdev
, struct mlx5e_params
*params
)
184 bool is_mpwqe
= MLX5E_GET_PFLAG(params
, MLX5E_PFLAG_SKB_TX_MPWQE
);
187 stop_room
= mlx5e_tls_get_stop_room(mdev
, params
);
188 stop_room
+= mlx5e_stop_room_for_wqe(MLX5_SEND_WQE_MAX_WQEBBS
);
190 /* A MPWQE can take up to the maximum-sized WQE + all the normal
191 * stop room can be taken if a new packet breaks the active
192 * MPWQE session and allocates its WQEs right away.
194 stop_room
+= mlx5e_stop_room_for_wqe(MLX5_SEND_WQE_MAX_WQEBBS
);
199 int mlx5e_validate_params(struct mlx5_core_dev
*mdev
, struct mlx5e_params
*params
)
201 size_t sq_size
= 1 << params
->log_sq_size
;
204 stop_room
= mlx5e_calc_sq_stop_room(mdev
, params
);
205 if (stop_room
>= sq_size
) {
206 mlx5_core_err(mdev
, "Stop room %u is bigger than the SQ size %zu\n",
214 static struct dim_cq_moder
mlx5e_get_def_tx_moderation(u8 cq_period_mode
)
216 struct dim_cq_moder moder
= {};
218 moder
.cq_period_mode
= cq_period_mode
;
219 moder
.pkts
= MLX5E_PARAMS_DEFAULT_TX_CQ_MODERATION_PKTS
;
220 moder
.usec
= MLX5E_PARAMS_DEFAULT_TX_CQ_MODERATION_USEC
;
221 if (cq_period_mode
== MLX5_CQ_PERIOD_MODE_START_FROM_CQE
)
222 moder
.usec
= MLX5E_PARAMS_DEFAULT_TX_CQ_MODERATION_USEC_FROM_CQE
;
227 static struct dim_cq_moder
mlx5e_get_def_rx_moderation(u8 cq_period_mode
)
229 struct dim_cq_moder moder
= {};
231 moder
.cq_period_mode
= cq_period_mode
;
232 moder
.pkts
= MLX5E_PARAMS_DEFAULT_RX_CQ_MODERATION_PKTS
;
233 moder
.usec
= MLX5E_PARAMS_DEFAULT_RX_CQ_MODERATION_USEC
;
234 if (cq_period_mode
== MLX5_CQ_PERIOD_MODE_START_FROM_CQE
)
235 moder
.usec
= MLX5E_PARAMS_DEFAULT_RX_CQ_MODERATION_USEC_FROM_CQE
;
240 static u8
mlx5_to_net_dim_cq_period_mode(u8 cq_period_mode
)
242 return cq_period_mode
== MLX5_CQ_PERIOD_MODE_START_FROM_CQE
?
243 DIM_CQ_PERIOD_MODE_START_FROM_CQE
:
244 DIM_CQ_PERIOD_MODE_START_FROM_EQE
;
247 void mlx5e_reset_tx_moderation(struct mlx5e_params
*params
, u8 cq_period_mode
)
249 if (params
->tx_dim_enabled
) {
250 u8 dim_period_mode
= mlx5_to_net_dim_cq_period_mode(cq_period_mode
);
252 params
->tx_cq_moderation
= net_dim_get_def_tx_moderation(dim_period_mode
);
254 params
->tx_cq_moderation
= mlx5e_get_def_tx_moderation(cq_period_mode
);
258 void mlx5e_reset_rx_moderation(struct mlx5e_params
*params
, u8 cq_period_mode
)
260 if (params
->rx_dim_enabled
) {
261 u8 dim_period_mode
= mlx5_to_net_dim_cq_period_mode(cq_period_mode
);
263 params
->rx_cq_moderation
= net_dim_get_def_rx_moderation(dim_period_mode
);
265 params
->rx_cq_moderation
= mlx5e_get_def_rx_moderation(cq_period_mode
);
269 void mlx5e_set_tx_cq_mode_params(struct mlx5e_params
*params
, u8 cq_period_mode
)
271 mlx5e_reset_tx_moderation(params
, cq_period_mode
);
272 MLX5E_SET_PFLAG(params
, MLX5E_PFLAG_TX_CQE_BASED_MODER
,
273 params
->tx_cq_moderation
.cq_period_mode
==
274 MLX5_CQ_PERIOD_MODE_START_FROM_CQE
);
277 void mlx5e_set_rx_cq_mode_params(struct mlx5e_params
*params
, u8 cq_period_mode
)
279 mlx5e_reset_rx_moderation(params
, cq_period_mode
);
280 MLX5E_SET_PFLAG(params
, MLX5E_PFLAG_RX_CQE_BASED_MODER
,
281 params
->rx_cq_moderation
.cq_period_mode
==
282 MLX5_CQ_PERIOD_MODE_START_FROM_CQE
);
285 bool slow_pci_heuristic(struct mlx5_core_dev
*mdev
)
290 mlx5e_port_max_linkspeed(mdev
, &link_speed
);
291 pci_bw
= pcie_bandwidth_available(mdev
->pdev
, NULL
, NULL
, NULL
);
292 mlx5_core_dbg_once(mdev
, "Max link speed = %d, PCI BW = %d\n",
295 #define MLX5E_SLOW_PCI_RATIO (2)
297 return link_speed
&& pci_bw
&&
298 link_speed
> MLX5E_SLOW_PCI_RATIO
* pci_bw
;
301 bool mlx5e_striding_rq_possible(struct mlx5_core_dev
*mdev
,
302 struct mlx5e_params
*params
)
304 if (!mlx5e_check_fragmented_striding_rq_cap(mdev
))
307 if (mlx5_fpga_is_ipsec_device(mdev
))
310 if (params
->xdp_prog
) {
311 /* XSK params are not considered here. If striding RQ is in use,
312 * and an XSK is being opened, mlx5e_rx_mpwqe_is_linear_skb will
313 * be called with the known XSK params.
315 if (!mlx5e_rx_mpwqe_is_linear_skb(mdev
, params
, NULL
))
322 void mlx5e_init_rq_type_params(struct mlx5_core_dev
*mdev
,
323 struct mlx5e_params
*params
)
325 params
->log_rq_mtu_frames
= is_kdump_kernel() ?
326 MLX5E_PARAMS_MINIMUM_LOG_RQ_SIZE
:
327 MLX5E_PARAMS_DEFAULT_LOG_RQ_SIZE
;
329 mlx5_core_info(mdev
, "MLX5E: StrdRq(%d) RqSz(%ld) StrdSz(%ld) RxCqeCmprss(%d)\n",
330 params
->rq_wq_type
== MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ
,
331 params
->rq_wq_type
== MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ
?
332 BIT(mlx5e_mpwqe_get_log_rq_size(params
, NULL
)) :
333 BIT(params
->log_rq_mtu_frames
),
334 BIT(mlx5e_mpwqe_get_log_stride_size(mdev
, params
, NULL
)),
335 MLX5E_GET_PFLAG(params
, MLX5E_PFLAG_RX_CQE_COMPRESS
));
338 void mlx5e_set_rq_type(struct mlx5_core_dev
*mdev
, struct mlx5e_params
*params
)
340 params
->rq_wq_type
= mlx5e_striding_rq_possible(mdev
, params
) &&
341 MLX5E_GET_PFLAG(params
, MLX5E_PFLAG_RX_STRIDING_RQ
) ?
342 MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ
:
346 void mlx5e_build_rq_params(struct mlx5_core_dev
*mdev
,
347 struct mlx5e_params
*params
)
349 /* Prefer Striding RQ, unless any of the following holds:
350 * - Striding RQ configuration is not possible/supported.
351 * - Slow PCI heuristic.
352 * - Legacy RQ would use linear SKB while Striding RQ would use non-linear.
354 * No XSK params: checking the availability of striding RQ in general.
356 if (!slow_pci_heuristic(mdev
) &&
357 mlx5e_striding_rq_possible(mdev
, params
) &&
358 (mlx5e_rx_mpwqe_is_linear_skb(mdev
, params
, NULL
) ||
359 !mlx5e_rx_is_linear_skb(params
, NULL
)))
360 MLX5E_SET_PFLAG(params
, MLX5E_PFLAG_RX_STRIDING_RQ
, true);
361 mlx5e_set_rq_type(mdev
, params
);
362 mlx5e_init_rq_type_params(mdev
, params
);
365 /* Build queue parameters */
367 void mlx5e_build_create_cq_param(struct mlx5e_create_cq_param
*ccp
, struct mlx5e_channel
*c
)
369 *ccp
= (struct mlx5e_create_cq_param
) {
371 .ch_stats
= c
->stats
,
372 .node
= cpu_to_node(c
->cpu
),
377 #define DEFAULT_FRAG_SIZE (2048)
379 static void mlx5e_build_rq_frags_info(struct mlx5_core_dev
*mdev
,
380 struct mlx5e_params
*params
,
381 struct mlx5e_xsk_param
*xsk
,
382 struct mlx5e_rq_frags_info
*info
)
384 u32 byte_count
= MLX5E_SW2HW_MTU(params
, params
->sw_mtu
);
385 int frag_size_max
= DEFAULT_FRAG_SIZE
;
389 if (mlx5_fpga_is_ipsec_device(mdev
))
390 byte_count
+= MLX5E_METADATA_ETHER_LEN
;
392 if (mlx5e_rx_is_linear_skb(params
, xsk
)) {
395 frag_stride
= mlx5e_rx_get_linear_frag_sz(params
, xsk
);
396 frag_stride
= roundup_pow_of_two(frag_stride
);
398 info
->arr
[0].frag_size
= byte_count
;
399 info
->arr
[0].frag_stride
= frag_stride
;
401 info
->wqe_bulk
= PAGE_SIZE
/ frag_stride
;
405 if (byte_count
> PAGE_SIZE
+
406 (MLX5E_MAX_RX_FRAGS
- 1) * frag_size_max
)
407 frag_size_max
= PAGE_SIZE
;
410 while (buf_size
< byte_count
) {
411 int frag_size
= byte_count
- buf_size
;
413 if (i
< MLX5E_MAX_RX_FRAGS
- 1)
414 frag_size
= min(frag_size
, frag_size_max
);
416 info
->arr
[i
].frag_size
= frag_size
;
417 info
->arr
[i
].frag_stride
= roundup_pow_of_two(frag_size
);
419 buf_size
+= frag_size
;
423 /* number of different wqes sharing a page */
424 info
->wqe_bulk
= 1 + (info
->num_frags
% 2);
427 info
->wqe_bulk
= max_t(u8
, info
->wqe_bulk
, 8);
428 info
->log_num_frags
= order_base_2(info
->num_frags
);
431 static u8
mlx5e_get_rqwq_log_stride(u8 wq_type
, int ndsegs
)
433 int sz
= sizeof(struct mlx5_wqe_data_seg
) * ndsegs
;
436 case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ
:
437 sz
+= sizeof(struct mlx5e_rx_wqe_ll
);
439 default: /* MLX5_WQ_TYPE_CYCLIC */
440 sz
+= sizeof(struct mlx5e_rx_wqe_cyc
);
443 return order_base_2(sz
);
446 static void mlx5e_build_common_cq_param(struct mlx5_core_dev
*mdev
,
447 struct mlx5e_cq_param
*param
)
449 void *cqc
= param
->cqc
;
451 MLX5_SET(cqc
, cqc
, uar_page
, mdev
->priv
.uar
->index
);
452 if (MLX5_CAP_GEN(mdev
, cqe_128_always
) && cache_line_size() >= 128)
453 MLX5_SET(cqc
, cqc
, cqe_sz
, CQE_STRIDE_128_PAD
);
456 static void mlx5e_build_rx_cq_param(struct mlx5_core_dev
*mdev
,
457 struct mlx5e_params
*params
,
458 struct mlx5e_xsk_param
*xsk
,
459 struct mlx5e_cq_param
*param
)
461 bool hw_stridx
= false;
462 void *cqc
= param
->cqc
;
465 switch (params
->rq_wq_type
) {
466 case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ
:
467 log_cq_size
= mlx5e_mpwqe_get_log_rq_size(params
, xsk
) +
468 mlx5e_mpwqe_get_log_num_strides(mdev
, params
, xsk
);
469 hw_stridx
= MLX5_CAP_GEN(mdev
, mini_cqe_resp_stride_index
);
471 default: /* MLX5_WQ_TYPE_CYCLIC */
472 log_cq_size
= params
->log_rq_mtu_frames
;
475 MLX5_SET(cqc
, cqc
, log_cq_size
, log_cq_size
);
476 if (MLX5E_GET_PFLAG(params
, MLX5E_PFLAG_RX_CQE_COMPRESS
)) {
477 MLX5_SET(cqc
, cqc
, mini_cqe_res_format
, hw_stridx
?
478 MLX5_CQE_FORMAT_CSUM_STRIDX
: MLX5_CQE_FORMAT_CSUM
);
479 MLX5_SET(cqc
, cqc
, cqe_comp_en
, 1);
482 mlx5e_build_common_cq_param(mdev
, param
);
483 param
->cq_period_mode
= params
->rx_cq_moderation
.cq_period_mode
;
486 static u8
rq_end_pad_mode(struct mlx5_core_dev
*mdev
, struct mlx5e_params
*params
)
488 bool ro
= pcie_relaxed_ordering_enabled(mdev
->pdev
) &&
489 MLX5_CAP_GEN(mdev
, relaxed_ordering_write
);
491 return ro
&& params
->lro_en
?
492 MLX5_WQ_END_PAD_MODE_NONE
: MLX5_WQ_END_PAD_MODE_ALIGN
;
495 int mlx5e_build_rq_param(struct mlx5_core_dev
*mdev
,
496 struct mlx5e_params
*params
,
497 struct mlx5e_xsk_param
*xsk
,
499 struct mlx5e_rq_param
*param
)
501 void *rqc
= param
->rqc
;
502 void *wq
= MLX5_ADDR_OF(rqc
, rqc
, wq
);
505 switch (params
->rq_wq_type
) {
506 case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ
: {
507 u8 log_wqe_num_of_strides
= mlx5e_mpwqe_get_log_num_strides(mdev
, params
, xsk
);
508 u8 log_wqe_stride_size
= mlx5e_mpwqe_get_log_stride_size(mdev
, params
, xsk
);
510 if (!mlx5e_verify_rx_mpwqe_strides(mdev
, log_wqe_stride_size
,
511 log_wqe_num_of_strides
)) {
513 "Bad RX MPWQE params: log_stride_size %u, log_num_strides %u\n",
514 log_wqe_stride_size
, log_wqe_num_of_strides
);
518 MLX5_SET(wq
, wq
, log_wqe_num_of_strides
,
519 log_wqe_num_of_strides
- MLX5_MPWQE_LOG_NUM_STRIDES_BASE
);
520 MLX5_SET(wq
, wq
, log_wqe_stride_size
,
521 log_wqe_stride_size
- MLX5_MPWQE_LOG_STRIDE_SZ_BASE
);
522 MLX5_SET(wq
, wq
, log_wq_sz
, mlx5e_mpwqe_get_log_rq_size(params
, xsk
));
525 default: /* MLX5_WQ_TYPE_CYCLIC */
526 MLX5_SET(wq
, wq
, log_wq_sz
, params
->log_rq_mtu_frames
);
527 mlx5e_build_rq_frags_info(mdev
, params
, xsk
, ¶m
->frags_info
);
528 ndsegs
= param
->frags_info
.num_frags
;
531 MLX5_SET(wq
, wq
, wq_type
, params
->rq_wq_type
);
532 MLX5_SET(wq
, wq
, end_padding_mode
, rq_end_pad_mode(mdev
, params
));
533 MLX5_SET(wq
, wq
, log_wq_stride
,
534 mlx5e_get_rqwq_log_stride(params
->rq_wq_type
, ndsegs
));
535 MLX5_SET(wq
, wq
, pd
, mdev
->mlx5e_res
.hw_objs
.pdn
);
536 MLX5_SET(rqc
, rqc
, counter_set_id
, q_counter
);
537 MLX5_SET(rqc
, rqc
, vsd
, params
->vlan_strip_disable
);
538 MLX5_SET(rqc
, rqc
, scatter_fcs
, params
->scatter_fcs_en
);
540 param
->wq
.buf_numa_node
= dev_to_node(mlx5_core_dma_dev(mdev
));
541 mlx5e_build_rx_cq_param(mdev
, params
, xsk
, ¶m
->cqp
);
546 void mlx5e_build_drop_rq_param(struct mlx5_core_dev
*mdev
,
548 struct mlx5e_rq_param
*param
)
550 void *rqc
= param
->rqc
;
551 void *wq
= MLX5_ADDR_OF(rqc
, rqc
, wq
);
553 MLX5_SET(wq
, wq
, wq_type
, MLX5_WQ_TYPE_CYCLIC
);
554 MLX5_SET(wq
, wq
, log_wq_stride
,
555 mlx5e_get_rqwq_log_stride(MLX5_WQ_TYPE_CYCLIC
, 1));
556 MLX5_SET(rqc
, rqc
, counter_set_id
, q_counter
);
558 param
->wq
.buf_numa_node
= dev_to_node(mlx5_core_dma_dev(mdev
));
561 void mlx5e_build_tx_cq_param(struct mlx5_core_dev
*mdev
,
562 struct mlx5e_params
*params
,
563 struct mlx5e_cq_param
*param
)
565 void *cqc
= param
->cqc
;
567 MLX5_SET(cqc
, cqc
, log_cq_size
, params
->log_sq_size
);
569 mlx5e_build_common_cq_param(mdev
, param
);
570 param
->cq_period_mode
= params
->tx_cq_moderation
.cq_period_mode
;
573 void mlx5e_build_sq_param_common(struct mlx5_core_dev
*mdev
,
574 struct mlx5e_sq_param
*param
)
576 void *sqc
= param
->sqc
;
577 void *wq
= MLX5_ADDR_OF(sqc
, sqc
, wq
);
579 MLX5_SET(wq
, wq
, log_wq_stride
, ilog2(MLX5_SEND_WQE_BB
));
580 MLX5_SET(wq
, wq
, pd
, mdev
->mlx5e_res
.hw_objs
.pdn
);
582 param
->wq
.buf_numa_node
= dev_to_node(mlx5_core_dma_dev(mdev
));
585 void mlx5e_build_sq_param(struct mlx5_core_dev
*mdev
,
586 struct mlx5e_params
*params
,
587 struct mlx5e_sq_param
*param
)
589 void *sqc
= param
->sqc
;
590 void *wq
= MLX5_ADDR_OF(sqc
, sqc
, wq
);
593 allow_swp
= mlx5_geneve_tx_allowed(mdev
) ||
594 !!MLX5_IPSEC_DEV(mdev
);
595 mlx5e_build_sq_param_common(mdev
, param
);
596 MLX5_SET(wq
, wq
, log_wq_sz
, params
->log_sq_size
);
597 MLX5_SET(sqc
, sqc
, allow_swp
, allow_swp
);
598 param
->is_mpw
= MLX5E_GET_PFLAG(params
, MLX5E_PFLAG_SKB_TX_MPWQE
);
599 param
->stop_room
= mlx5e_calc_sq_stop_room(mdev
, params
);
600 mlx5e_build_tx_cq_param(mdev
, params
, ¶m
->cqp
);
603 static void mlx5e_build_ico_cq_param(struct mlx5_core_dev
*mdev
,
605 struct mlx5e_cq_param
*param
)
607 void *cqc
= param
->cqc
;
609 MLX5_SET(cqc
, cqc
, log_cq_size
, log_wq_size
);
611 mlx5e_build_common_cq_param(mdev
, param
);
613 param
->cq_period_mode
= DIM_CQ_PERIOD_MODE_START_FROM_EQE
;
616 static u8
mlx5e_get_rq_log_wq_sz(void *rqc
)
618 void *wq
= MLX5_ADDR_OF(rqc
, rqc
, wq
);
620 return MLX5_GET(wq
, wq
, log_wq_sz
);
623 static u8
mlx5e_build_icosq_log_wq_sz(struct mlx5e_params
*params
,
624 struct mlx5e_rq_param
*rqp
)
626 switch (params
->rq_wq_type
) {
627 case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ
:
628 return max_t(u8
, MLX5E_PARAMS_MINIMUM_LOG_SQ_SIZE
,
629 order_base_2(MLX5E_UMR_WQEBBS
) +
630 mlx5e_get_rq_log_wq_sz(rqp
->rqc
));
631 default: /* MLX5_WQ_TYPE_CYCLIC */
632 return MLX5E_PARAMS_MINIMUM_LOG_SQ_SIZE
;
636 static u8
mlx5e_build_async_icosq_log_wq_sz(struct mlx5_core_dev
*mdev
)
638 if (mlx5e_accel_is_ktls_rx(mdev
))
639 return MLX5E_PARAMS_DEFAULT_LOG_SQ_SIZE
;
641 return MLX5E_PARAMS_MINIMUM_LOG_SQ_SIZE
;
644 static void mlx5e_build_icosq_param(struct mlx5_core_dev
*mdev
,
646 struct mlx5e_sq_param
*param
)
648 void *sqc
= param
->sqc
;
649 void *wq
= MLX5_ADDR_OF(sqc
, sqc
, wq
);
651 mlx5e_build_sq_param_common(mdev
, param
);
653 MLX5_SET(wq
, wq
, log_wq_sz
, log_wq_size
);
654 MLX5_SET(sqc
, sqc
, reg_umr
, MLX5_CAP_ETH(mdev
, reg_umr_sq
));
655 mlx5e_build_ico_cq_param(mdev
, log_wq_size
, ¶m
->cqp
);
658 static void mlx5e_build_async_icosq_param(struct mlx5_core_dev
*mdev
,
660 struct mlx5e_sq_param
*param
)
662 void *sqc
= param
->sqc
;
663 void *wq
= MLX5_ADDR_OF(sqc
, sqc
, wq
);
665 mlx5e_build_sq_param_common(mdev
, param
);
666 param
->stop_room
= mlx5e_stop_room_for_wqe(1); /* for XSK NOP */
667 param
->is_tls
= mlx5e_accel_is_ktls_rx(mdev
);
669 param
->stop_room
+= mlx5e_stop_room_for_wqe(1); /* for TLS RX resync NOP */
670 MLX5_SET(sqc
, sqc
, reg_umr
, MLX5_CAP_ETH(mdev
, reg_umr_sq
));
671 MLX5_SET(wq
, wq
, log_wq_sz
, log_wq_size
);
672 mlx5e_build_ico_cq_param(mdev
, log_wq_size
, ¶m
->cqp
);
675 void mlx5e_build_xdpsq_param(struct mlx5_core_dev
*mdev
,
676 struct mlx5e_params
*params
,
677 struct mlx5e_sq_param
*param
)
679 void *sqc
= param
->sqc
;
680 void *wq
= MLX5_ADDR_OF(sqc
, sqc
, wq
);
682 mlx5e_build_sq_param_common(mdev
, param
);
683 MLX5_SET(wq
, wq
, log_wq_sz
, params
->log_sq_size
);
684 param
->is_mpw
= MLX5E_GET_PFLAG(params
, MLX5E_PFLAG_XDP_TX_MPWQE
);
685 mlx5e_build_tx_cq_param(mdev
, params
, ¶m
->cqp
);
688 int mlx5e_build_channel_param(struct mlx5_core_dev
*mdev
,
689 struct mlx5e_params
*params
,
691 struct mlx5e_channel_param
*cparam
)
693 u8 icosq_log_wq_sz
, async_icosq_log_wq_sz
;
696 err
= mlx5e_build_rq_param(mdev
, params
, NULL
, q_counter
, &cparam
->rq
);
700 icosq_log_wq_sz
= mlx5e_build_icosq_log_wq_sz(params
, &cparam
->rq
);
701 async_icosq_log_wq_sz
= mlx5e_build_async_icosq_log_wq_sz(mdev
);
703 mlx5e_build_sq_param(mdev
, params
, &cparam
->txq_sq
);
704 mlx5e_build_xdpsq_param(mdev
, params
, &cparam
->xdp_sq
);
705 mlx5e_build_icosq_param(mdev
, icosq_log_wq_sz
, &cparam
->icosq
);
706 mlx5e_build_async_icosq_param(mdev
, async_icosq_log_wq_sz
, &cparam
->async_icosq
);