1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright 2015 6WIND S.A.
3 * Copyright 2015 Mellanox Technologies, Ltd
11 #include <sys/queue.h>
14 /* ISO C doesn't support unnamed structs/unions, disabling -pedantic. */
16 #pragma GCC diagnostic ignored "-Wpedantic"
18 #include <infiniband/verbs.h>
19 #include <infiniband/mlx5dv.h>
21 #pragma GCC diagnostic error "-Wpedantic"
25 #include <rte_malloc.h>
26 #include <rte_ethdev_driver.h>
27 #include <rte_common.h>
28 #include <rte_interrupts.h>
29 #include <rte_debug.h>
32 #include <mlx5_glue.h>
33 #include <mlx5_devx_cmds.h>
35 #include "mlx5_defs.h"
37 #include "mlx5_rxtx.h"
38 #include "mlx5_utils.h"
39 #include "mlx5_autoconf.h"
40 #include "mlx5_flow.h"
43 /* Default RSS hash key also used for ConnectX-3. */
44 uint8_t rss_hash_default_key
[] = {
45 0x2c, 0xc6, 0x81, 0xd1,
46 0x5b, 0xdb, 0xf4, 0xf7,
47 0xfc, 0xa2, 0x83, 0x19,
48 0xdb, 0x1a, 0x3e, 0x94,
49 0x6b, 0x9e, 0x38, 0xd9,
50 0x2c, 0x9c, 0x03, 0xd1,
51 0xad, 0x99, 0x44, 0xa7,
52 0xd9, 0x56, 0x3d, 0x59,
53 0x06, 0x3c, 0x25, 0xf3,
54 0xfc, 0x1f, 0xdc, 0x2a,
57 /* Length of the default RSS hash key. */
58 static_assert(MLX5_RSS_HASH_KEY_LEN
==
59 (unsigned int)sizeof(rss_hash_default_key
),
60 "wrong RSS default key size.");
63 * Check whether Multi-Packet RQ can be enabled for the device.
66 * Pointer to Ethernet device.
69 * 1 if supported, negative errno value if not.
72 mlx5_check_mprq_support(struct rte_eth_dev
*dev
)
74 struct mlx5_priv
*priv
= dev
->data
->dev_private
;
76 if (priv
->config
.mprq
.enabled
&&
77 priv
->rxqs_n
>= priv
->config
.mprq
.min_rxqs_num
)
83 * Check whether Multi-Packet RQ is enabled for the Rx queue.
86 * Pointer to receive queue structure.
89 * 0 if disabled, otherwise enabled.
92 mlx5_rxq_mprq_enabled(struct mlx5_rxq_data
*rxq
)
94 return rxq
->strd_num_n
> 0;
98 * Check whether Multi-Packet RQ is enabled for the device.
101 * Pointer to Ethernet device.
104 * 0 if disabled, otherwise enabled.
107 mlx5_mprq_enabled(struct rte_eth_dev
*dev
)
109 struct mlx5_priv
*priv
= dev
->data
->dev_private
;
114 if (mlx5_check_mprq_support(dev
) < 0)
116 /* All the configured queues should be enabled. */
117 for (i
= 0; i
< priv
->rxqs_n
; ++i
) {
118 struct mlx5_rxq_data
*rxq
= (*priv
->rxqs
)[i
];
119 struct mlx5_rxq_ctrl
*rxq_ctrl
= container_of
120 (rxq
, struct mlx5_rxq_ctrl
, rxq
);
122 if (rxq
== NULL
|| rxq_ctrl
->type
!= MLX5_RXQ_TYPE_STANDARD
)
125 if (mlx5_rxq_mprq_enabled(rxq
))
128 /* Multi-Packet RQ can't be partially configured. */
129 MLX5_ASSERT(n
== 0 || n
== n_ibv
);
134 * Allocate RX queue elements for Multi-Packet RQ.
137 * Pointer to RX queue structure.
140 * 0 on success, a negative errno value otherwise and rte_errno is set.
143 rxq_alloc_elts_mprq(struct mlx5_rxq_ctrl
*rxq_ctrl
)
145 struct mlx5_rxq_data
*rxq
= &rxq_ctrl
->rxq
;
146 unsigned int wqe_n
= 1 << rxq
->elts_n
;
150 /* Iterate on segments. */
151 for (i
= 0; i
<= wqe_n
; ++i
) {
152 struct mlx5_mprq_buf
*buf
;
154 if (rte_mempool_get(rxq
->mprq_mp
, (void **)&buf
) < 0) {
155 DRV_LOG(ERR
, "port %u empty mbuf pool", rxq
->port_id
);
160 (*rxq
->mprq_bufs
)[i
] = buf
;
162 rxq
->mprq_repl
= buf
;
165 "port %u Rx queue %u allocated and configured %u segments",
166 rxq
->port_id
, rxq
->idx
, wqe_n
);
169 err
= rte_errno
; /* Save rte_errno before cleanup. */
171 for (i
= 0; (i
!= wqe_n
); ++i
) {
172 if ((*rxq
->mprq_bufs
)[i
] != NULL
)
173 rte_mempool_put(rxq
->mprq_mp
,
174 (*rxq
->mprq_bufs
)[i
]);
175 (*rxq
->mprq_bufs
)[i
] = NULL
;
177 DRV_LOG(DEBUG
, "port %u Rx queue %u failed, freed everything",
178 rxq
->port_id
, rxq
->idx
);
179 rte_errno
= err
; /* Restore rte_errno. */
184 * Allocate RX queue elements for Single-Packet RQ.
187 * Pointer to RX queue structure.
190 * 0 on success, errno value on failure.
193 rxq_alloc_elts_sprq(struct mlx5_rxq_ctrl
*rxq_ctrl
)
195 const unsigned int sges_n
= 1 << rxq_ctrl
->rxq
.sges_n
;
196 unsigned int elts_n
= 1 << rxq_ctrl
->rxq
.elts_n
;
200 /* Iterate on segments. */
201 for (i
= 0; (i
!= elts_n
); ++i
) {
202 struct rte_mbuf
*buf
;
204 buf
= rte_pktmbuf_alloc(rxq_ctrl
->rxq
.mp
);
206 DRV_LOG(ERR
, "port %u empty mbuf pool",
207 PORT_ID(rxq_ctrl
->priv
));
211 /* Headroom is reserved by rte_pktmbuf_alloc(). */
212 MLX5_ASSERT(DATA_OFF(buf
) == RTE_PKTMBUF_HEADROOM
);
213 /* Buffer is supposed to be empty. */
214 MLX5_ASSERT(rte_pktmbuf_data_len(buf
) == 0);
215 MLX5_ASSERT(rte_pktmbuf_pkt_len(buf
) == 0);
216 MLX5_ASSERT(!buf
->next
);
217 /* Only the first segment keeps headroom. */
219 SET_DATA_OFF(buf
, 0);
220 PORT(buf
) = rxq_ctrl
->rxq
.port_id
;
221 DATA_LEN(buf
) = rte_pktmbuf_tailroom(buf
);
222 PKT_LEN(buf
) = DATA_LEN(buf
);
224 (*rxq_ctrl
->rxq
.elts
)[i
] = buf
;
226 /* If Rx vector is activated. */
227 if (mlx5_rxq_check_vec_support(&rxq_ctrl
->rxq
) > 0) {
228 struct mlx5_rxq_data
*rxq
= &rxq_ctrl
->rxq
;
229 struct rte_mbuf
*mbuf_init
= &rxq
->fake_mbuf
;
230 struct rte_pktmbuf_pool_private
*priv
=
231 (struct rte_pktmbuf_pool_private
*)
232 rte_mempool_get_priv(rxq_ctrl
->rxq
.mp
);
235 /* Initialize default rearm_data for vPMD. */
236 mbuf_init
->data_off
= RTE_PKTMBUF_HEADROOM
;
237 rte_mbuf_refcnt_set(mbuf_init
, 1);
238 mbuf_init
->nb_segs
= 1;
239 mbuf_init
->port
= rxq
->port_id
;
240 if (priv
->flags
& RTE_PKTMBUF_POOL_F_PINNED_EXT_BUF
)
241 mbuf_init
->ol_flags
= EXT_ATTACHED_MBUF
;
243 * prevent compiler reordering:
244 * rearm_data covers previous fields.
246 rte_compiler_barrier();
247 rxq
->mbuf_initializer
=
248 *(rte_xmm_t
*)&mbuf_init
->rearm_data
;
249 /* Padding with a fake mbuf for vectorized Rx. */
250 for (j
= 0; j
< MLX5_VPMD_DESCS_PER_LOOP
; ++j
)
251 (*rxq
->elts
)[elts_n
+ j
] = &rxq
->fake_mbuf
;
254 "port %u Rx queue %u allocated and configured %u segments"
256 PORT_ID(rxq_ctrl
->priv
), rxq_ctrl
->rxq
.idx
, elts_n
,
257 elts_n
/ (1 << rxq_ctrl
->rxq
.sges_n
));
260 err
= rte_errno
; /* Save rte_errno before cleanup. */
262 for (i
= 0; (i
!= elts_n
); ++i
) {
263 if ((*rxq_ctrl
->rxq
.elts
)[i
] != NULL
)
264 rte_pktmbuf_free_seg((*rxq_ctrl
->rxq
.elts
)[i
]);
265 (*rxq_ctrl
->rxq
.elts
)[i
] = NULL
;
267 DRV_LOG(DEBUG
, "port %u Rx queue %u failed, freed everything",
268 PORT_ID(rxq_ctrl
->priv
), rxq_ctrl
->rxq
.idx
);
269 rte_errno
= err
; /* Restore rte_errno. */
274 * Allocate RX queue elements.
277 * Pointer to RX queue structure.
280 * 0 on success, errno value on failure.
283 rxq_alloc_elts(struct mlx5_rxq_ctrl
*rxq_ctrl
)
285 return mlx5_rxq_mprq_enabled(&rxq_ctrl
->rxq
) ?
286 rxq_alloc_elts_mprq(rxq_ctrl
) : rxq_alloc_elts_sprq(rxq_ctrl
);
290 * Free RX queue elements for Multi-Packet RQ.
293 * Pointer to RX queue structure.
296 rxq_free_elts_mprq(struct mlx5_rxq_ctrl
*rxq_ctrl
)
298 struct mlx5_rxq_data
*rxq
= &rxq_ctrl
->rxq
;
301 DRV_LOG(DEBUG
, "port %u Multi-Packet Rx queue %u freeing WRs",
302 rxq
->port_id
, rxq
->idx
);
303 if (rxq
->mprq_bufs
== NULL
)
305 MLX5_ASSERT(mlx5_rxq_check_vec_support(rxq
) < 0);
306 for (i
= 0; (i
!= (1u << rxq
->elts_n
)); ++i
) {
307 if ((*rxq
->mprq_bufs
)[i
] != NULL
)
308 mlx5_mprq_buf_free((*rxq
->mprq_bufs
)[i
]);
309 (*rxq
->mprq_bufs
)[i
] = NULL
;
311 if (rxq
->mprq_repl
!= NULL
) {
312 mlx5_mprq_buf_free(rxq
->mprq_repl
);
313 rxq
->mprq_repl
= NULL
;
318 * Free RX queue elements for Single-Packet RQ.
321 * Pointer to RX queue structure.
324 rxq_free_elts_sprq(struct mlx5_rxq_ctrl
*rxq_ctrl
)
326 struct mlx5_rxq_data
*rxq
= &rxq_ctrl
->rxq
;
327 const uint16_t q_n
= (1 << rxq
->elts_n
);
328 const uint16_t q_mask
= q_n
- 1;
329 uint16_t used
= q_n
- (rxq
->rq_ci
- rxq
->rq_pi
);
332 DRV_LOG(DEBUG
, "port %u Rx queue %u freeing WRs",
333 PORT_ID(rxq_ctrl
->priv
), rxq
->idx
);
334 if (rxq
->elts
== NULL
)
337 * Some mbuf in the Ring belongs to the application. They cannot be
340 if (mlx5_rxq_check_vec_support(rxq
) > 0) {
341 for (i
= 0; i
< used
; ++i
)
342 (*rxq
->elts
)[(rxq
->rq_ci
+ i
) & q_mask
] = NULL
;
343 rxq
->rq_pi
= rxq
->rq_ci
;
345 for (i
= 0; (i
!= (1u << rxq
->elts_n
)); ++i
) {
346 if ((*rxq
->elts
)[i
] != NULL
)
347 rte_pktmbuf_free_seg((*rxq
->elts
)[i
]);
348 (*rxq
->elts
)[i
] = NULL
;
353 * Free RX queue elements.
356 * Pointer to RX queue structure.
359 rxq_free_elts(struct mlx5_rxq_ctrl
*rxq_ctrl
)
361 if (mlx5_rxq_mprq_enabled(&rxq_ctrl
->rxq
))
362 rxq_free_elts_mprq(rxq_ctrl
);
364 rxq_free_elts_sprq(rxq_ctrl
);
368 * Returns the per-queue supported offloads.
371 * Pointer to Ethernet device.
374 * Supported Rx offloads.
377 mlx5_get_rx_queue_offloads(struct rte_eth_dev
*dev
)
379 struct mlx5_priv
*priv
= dev
->data
->dev_private
;
380 struct mlx5_dev_config
*config
= &priv
->config
;
381 uint64_t offloads
= (DEV_RX_OFFLOAD_SCATTER
|
382 DEV_RX_OFFLOAD_TIMESTAMP
|
383 DEV_RX_OFFLOAD_JUMBO_FRAME
|
384 DEV_RX_OFFLOAD_RSS_HASH
);
386 if (config
->hw_fcs_strip
)
387 offloads
|= DEV_RX_OFFLOAD_KEEP_CRC
;
390 offloads
|= (DEV_RX_OFFLOAD_IPV4_CKSUM
|
391 DEV_RX_OFFLOAD_UDP_CKSUM
|
392 DEV_RX_OFFLOAD_TCP_CKSUM
);
393 if (config
->hw_vlan_strip
)
394 offloads
|= DEV_RX_OFFLOAD_VLAN_STRIP
;
395 if (MLX5_LRO_SUPPORTED(dev
))
396 offloads
|= DEV_RX_OFFLOAD_TCP_LRO
;
402 * Returns the per-port supported offloads.
405 * Supported Rx offloads.
408 mlx5_get_rx_port_offloads(void)
410 uint64_t offloads
= DEV_RX_OFFLOAD_VLAN_FILTER
;
416 * Verify if the queue can be released.
419 * Pointer to Ethernet device.
424 * 1 if the queue can be released
425 * 0 if the queue can not be released, there are references to it.
426 * Negative errno and rte_errno is set if queue doesn't exist.
429 mlx5_rxq_releasable(struct rte_eth_dev
*dev
, uint16_t idx
)
431 struct mlx5_priv
*priv
= dev
->data
->dev_private
;
432 struct mlx5_rxq_ctrl
*rxq_ctrl
;
434 if (!(*priv
->rxqs
)[idx
]) {
438 rxq_ctrl
= container_of((*priv
->rxqs
)[idx
], struct mlx5_rxq_ctrl
, rxq
);
439 return (rte_atomic32_read(&rxq_ctrl
->refcnt
) == 1);
443 * Rx queue presetup checks.
446 * Pointer to Ethernet device structure.
450 * Number of descriptors to configure in queue.
453 * 0 on success, a negative errno value otherwise and rte_errno is set.
456 mlx5_rx_queue_pre_setup(struct rte_eth_dev
*dev
, uint16_t idx
, uint16_t desc
)
458 struct mlx5_priv
*priv
= dev
->data
->dev_private
;
460 if (!rte_is_power_of_2(desc
)) {
461 desc
= 1 << log2above(desc
);
463 "port %u increased number of descriptors in Rx queue %u"
464 " to the next power of two (%d)",
465 dev
->data
->port_id
, idx
, desc
);
467 DRV_LOG(DEBUG
, "port %u configuring Rx queue %u for %u descriptors",
468 dev
->data
->port_id
, idx
, desc
);
469 if (idx
>= priv
->rxqs_n
) {
470 DRV_LOG(ERR
, "port %u Rx queue index out of range (%u >= %u)",
471 dev
->data
->port_id
, idx
, priv
->rxqs_n
);
472 rte_errno
= EOVERFLOW
;
475 if (!mlx5_rxq_releasable(dev
, idx
)) {
476 DRV_LOG(ERR
, "port %u unable to release queue index %u",
477 dev
->data
->port_id
, idx
);
481 mlx5_rxq_release(dev
, idx
);
488 * Pointer to Ethernet device structure.
492 * Number of descriptors to configure in queue.
494 * NUMA socket on which memory must be allocated.
496 * Thresholds parameters.
498 * Memory pool for buffer allocations.
501 * 0 on success, a negative errno value otherwise and rte_errno is set.
504 mlx5_rx_queue_setup(struct rte_eth_dev
*dev
, uint16_t idx
, uint16_t desc
,
505 unsigned int socket
, const struct rte_eth_rxconf
*conf
,
506 struct rte_mempool
*mp
)
508 struct mlx5_priv
*priv
= dev
->data
->dev_private
;
509 struct mlx5_rxq_data
*rxq
= (*priv
->rxqs
)[idx
];
510 struct mlx5_rxq_ctrl
*rxq_ctrl
=
511 container_of(rxq
, struct mlx5_rxq_ctrl
, rxq
);
514 res
= mlx5_rx_queue_pre_setup(dev
, idx
, desc
);
517 rxq_ctrl
= mlx5_rxq_new(dev
, idx
, desc
, socket
, conf
, mp
);
519 DRV_LOG(ERR
, "port %u unable to allocate queue index %u",
520 dev
->data
->port_id
, idx
);
524 DRV_LOG(DEBUG
, "port %u adding Rx queue %u to list",
525 dev
->data
->port_id
, idx
);
526 (*priv
->rxqs
)[idx
] = &rxq_ctrl
->rxq
;
533 * Pointer to Ethernet device structure.
537 * Number of descriptors to configure in queue.
538 * @param hairpin_conf
539 * Hairpin configuration parameters.
542 * 0 on success, a negative errno value otherwise and rte_errno is set.
545 mlx5_rx_hairpin_queue_setup(struct rte_eth_dev
*dev
, uint16_t idx
,
547 const struct rte_eth_hairpin_conf
*hairpin_conf
)
549 struct mlx5_priv
*priv
= dev
->data
->dev_private
;
550 struct mlx5_rxq_data
*rxq
= (*priv
->rxqs
)[idx
];
551 struct mlx5_rxq_ctrl
*rxq_ctrl
=
552 container_of(rxq
, struct mlx5_rxq_ctrl
, rxq
);
555 res
= mlx5_rx_queue_pre_setup(dev
, idx
, desc
);
558 if (hairpin_conf
->peer_count
!= 1 ||
559 hairpin_conf
->peers
[0].port
!= dev
->data
->port_id
||
560 hairpin_conf
->peers
[0].queue
>= priv
->txqs_n
) {
561 DRV_LOG(ERR
, "port %u unable to setup hairpin queue index %u "
562 " invalid hairpind configuration", dev
->data
->port_id
,
567 rxq_ctrl
= mlx5_rxq_hairpin_new(dev
, idx
, desc
, hairpin_conf
);
569 DRV_LOG(ERR
, "port %u unable to allocate queue index %u",
570 dev
->data
->port_id
, idx
);
574 DRV_LOG(DEBUG
, "port %u adding Rx queue %u to list",
575 dev
->data
->port_id
, idx
);
576 (*priv
->rxqs
)[idx
] = &rxq_ctrl
->rxq
;
581 * DPDK callback to release a RX queue.
584 * Generic RX queue pointer.
587 mlx5_rx_queue_release(void *dpdk_rxq
)
589 struct mlx5_rxq_data
*rxq
= (struct mlx5_rxq_data
*)dpdk_rxq
;
590 struct mlx5_rxq_ctrl
*rxq_ctrl
;
591 struct mlx5_priv
*priv
;
595 rxq_ctrl
= container_of(rxq
, struct mlx5_rxq_ctrl
, rxq
);
596 priv
= rxq_ctrl
->priv
;
597 if (!mlx5_rxq_releasable(ETH_DEV(priv
), rxq_ctrl
->rxq
.idx
))
598 rte_panic("port %u Rx queue %u is still used by a flow and"
599 " cannot be removed\n",
600 PORT_ID(priv
), rxq
->idx
);
601 mlx5_rxq_release(ETH_DEV(priv
), rxq_ctrl
->rxq
.idx
);
605 * Get an Rx queue Verbs/DevX object.
608 * Pointer to Ethernet device.
610 * Queue index in DPDK Rx queue array
613 * The Verbs/DevX object if it exists.
615 static struct mlx5_rxq_obj
*
616 mlx5_rxq_obj_get(struct rte_eth_dev
*dev
, uint16_t idx
)
618 struct mlx5_priv
*priv
= dev
->data
->dev_private
;
619 struct mlx5_rxq_data
*rxq_data
= (*priv
->rxqs
)[idx
];
620 struct mlx5_rxq_ctrl
*rxq_ctrl
;
622 if (idx
>= priv
->rxqs_n
)
626 rxq_ctrl
= container_of(rxq_data
, struct mlx5_rxq_ctrl
, rxq
);
628 rte_atomic32_inc(&rxq_ctrl
->obj
->refcnt
);
629 return rxq_ctrl
->obj
;
633 * Release the resources allocated for an RQ DevX object.
636 * DevX Rx queue object.
639 rxq_release_rq_resources(struct mlx5_rxq_ctrl
*rxq_ctrl
)
641 if (rxq_ctrl
->rxq
.wqes
) {
642 rte_free((void *)(uintptr_t)rxq_ctrl
->rxq
.wqes
);
643 rxq_ctrl
->rxq
.wqes
= NULL
;
645 if (rxq_ctrl
->wq_umem
) {
646 mlx5_glue
->devx_umem_dereg(rxq_ctrl
->wq_umem
);
647 rxq_ctrl
->wq_umem
= NULL
;
652 * Release an Rx hairpin related resources.
655 * Hairpin Rx queue object.
658 rxq_obj_hairpin_release(struct mlx5_rxq_obj
*rxq_obj
)
660 struct mlx5_devx_modify_rq_attr rq_attr
= { 0 };
662 MLX5_ASSERT(rxq_obj
);
663 rq_attr
.state
= MLX5_RQC_STATE_RST
;
664 rq_attr
.rq_state
= MLX5_RQC_STATE_RDY
;
665 mlx5_devx_cmd_modify_rq(rxq_obj
->rq
, &rq_attr
);
666 claim_zero(mlx5_devx_cmd_destroy(rxq_obj
->rq
));
670 * Release an Rx verbs/DevX queue object.
673 * Verbs/DevX Rx queue object.
676 * 1 while a reference on it exists, 0 when freed.
679 mlx5_rxq_obj_release(struct mlx5_rxq_obj
*rxq_obj
)
681 MLX5_ASSERT(rxq_obj
);
682 if (rte_atomic32_dec_and_test(&rxq_obj
->refcnt
)) {
683 switch (rxq_obj
->type
) {
684 case MLX5_RXQ_OBJ_TYPE_IBV
:
685 MLX5_ASSERT(rxq_obj
->wq
);
686 MLX5_ASSERT(rxq_obj
->cq
);
687 rxq_free_elts(rxq_obj
->rxq_ctrl
);
688 claim_zero(mlx5_glue
->destroy_wq(rxq_obj
->wq
));
689 claim_zero(mlx5_glue
->destroy_cq(rxq_obj
->cq
));
691 case MLX5_RXQ_OBJ_TYPE_DEVX_RQ
:
692 MLX5_ASSERT(rxq_obj
->cq
);
693 MLX5_ASSERT(rxq_obj
->rq
);
694 rxq_free_elts(rxq_obj
->rxq_ctrl
);
695 claim_zero(mlx5_devx_cmd_destroy(rxq_obj
->rq
));
696 rxq_release_rq_resources(rxq_obj
->rxq_ctrl
);
697 claim_zero(mlx5_glue
->destroy_cq(rxq_obj
->cq
));
699 case MLX5_RXQ_OBJ_TYPE_DEVX_HAIRPIN
:
700 MLX5_ASSERT(rxq_obj
->rq
);
701 rxq_obj_hairpin_release(rxq_obj
);
704 if (rxq_obj
->channel
)
705 claim_zero(mlx5_glue
->destroy_comp_channel
707 LIST_REMOVE(rxq_obj
, next
);
715 * Allocate queue vector and fill epoll fd list for Rx interrupts.
718 * Pointer to Ethernet device.
721 * 0 on success, a negative errno value otherwise and rte_errno is set.
724 mlx5_rx_intr_vec_enable(struct rte_eth_dev
*dev
)
726 struct mlx5_priv
*priv
= dev
->data
->dev_private
;
728 unsigned int rxqs_n
= priv
->rxqs_n
;
729 unsigned int n
= RTE_MIN(rxqs_n
, (uint32_t)RTE_MAX_RXTX_INTR_VEC_ID
);
730 unsigned int count
= 0;
731 struct rte_intr_handle
*intr_handle
= dev
->intr_handle
;
733 if (!dev
->data
->dev_conf
.intr_conf
.rxq
)
735 mlx5_rx_intr_vec_disable(dev
);
736 intr_handle
->intr_vec
= malloc(n
* sizeof(intr_handle
->intr_vec
[0]));
737 if (intr_handle
->intr_vec
== NULL
) {
739 "port %u failed to allocate memory for interrupt"
740 " vector, Rx interrupts will not be supported",
745 intr_handle
->type
= RTE_INTR_HANDLE_EXT
;
746 for (i
= 0; i
!= n
; ++i
) {
747 /* This rxq obj must not be released in this function. */
748 struct mlx5_rxq_obj
*rxq_obj
= mlx5_rxq_obj_get(dev
, i
);
753 /* Skip queues that cannot request interrupts. */
754 if (!rxq_obj
|| !rxq_obj
->channel
) {
755 /* Use invalid intr_vec[] index to disable entry. */
756 intr_handle
->intr_vec
[i
] =
757 RTE_INTR_VEC_RXTX_OFFSET
+
758 RTE_MAX_RXTX_INTR_VEC_ID
;
761 if (count
>= RTE_MAX_RXTX_INTR_VEC_ID
) {
763 "port %u too many Rx queues for interrupt"
764 " vector size (%d), Rx interrupts cannot be"
766 dev
->data
->port_id
, RTE_MAX_RXTX_INTR_VEC_ID
);
767 mlx5_rx_intr_vec_disable(dev
);
771 fd
= rxq_obj
->channel
->fd
;
772 flags
= fcntl(fd
, F_GETFL
);
773 rc
= fcntl(fd
, F_SETFL
, flags
| O_NONBLOCK
);
777 "port %u failed to make Rx interrupt file"
778 " descriptor %d non-blocking for queue index"
780 dev
->data
->port_id
, fd
, i
);
781 mlx5_rx_intr_vec_disable(dev
);
784 intr_handle
->intr_vec
[i
] = RTE_INTR_VEC_RXTX_OFFSET
+ count
;
785 intr_handle
->efds
[count
] = fd
;
789 mlx5_rx_intr_vec_disable(dev
);
791 intr_handle
->nb_efd
= count
;
796 * Clean up Rx interrupts handler.
799 * Pointer to Ethernet device.
802 mlx5_rx_intr_vec_disable(struct rte_eth_dev
*dev
)
804 struct mlx5_priv
*priv
= dev
->data
->dev_private
;
805 struct rte_intr_handle
*intr_handle
= dev
->intr_handle
;
807 unsigned int rxqs_n
= priv
->rxqs_n
;
808 unsigned int n
= RTE_MIN(rxqs_n
, (uint32_t)RTE_MAX_RXTX_INTR_VEC_ID
);
810 if (!dev
->data
->dev_conf
.intr_conf
.rxq
)
812 if (!intr_handle
->intr_vec
)
814 for (i
= 0; i
!= n
; ++i
) {
815 struct mlx5_rxq_ctrl
*rxq_ctrl
;
816 struct mlx5_rxq_data
*rxq_data
;
818 if (intr_handle
->intr_vec
[i
] == RTE_INTR_VEC_RXTX_OFFSET
+
819 RTE_MAX_RXTX_INTR_VEC_ID
)
822 * Need to access directly the queue to release the reference
823 * kept in mlx5_rx_intr_vec_enable().
825 rxq_data
= (*priv
->rxqs
)[i
];
826 rxq_ctrl
= container_of(rxq_data
, struct mlx5_rxq_ctrl
, rxq
);
828 mlx5_rxq_obj_release(rxq_ctrl
->obj
);
831 rte_intr_free_epoll_fd(intr_handle
);
832 if (intr_handle
->intr_vec
)
833 free(intr_handle
->intr_vec
);
834 intr_handle
->nb_efd
= 0;
835 intr_handle
->intr_vec
= NULL
;
839 * MLX5 CQ notification .
842 * Pointer to receive queue structure.
844 * Sequence number per receive queue .
847 mlx5_arm_cq(struct mlx5_rxq_data
*rxq
, int sq_n_rxq
)
850 uint32_t doorbell_hi
;
852 void *cq_db_reg
= (char *)rxq
->cq_uar
+ MLX5_CQ_DOORBELL
;
854 sq_n
= sq_n_rxq
& MLX5_CQ_SQN_MASK
;
855 doorbell_hi
= sq_n
<< MLX5_CQ_SQN_OFFSET
| (rxq
->cq_ci
& MLX5_CI_MASK
);
856 doorbell
= (uint64_t)doorbell_hi
<< 32;
857 doorbell
|= rxq
->cqn
;
858 rxq
->cq_db
[MLX5_CQ_ARM_DB
] = rte_cpu_to_be_32(doorbell_hi
);
859 mlx5_uar_write64(rte_cpu_to_be_64(doorbell
),
860 cq_db_reg
, rxq
->uar_lock_cq
);
864 * DPDK callback for Rx queue interrupt enable.
867 * Pointer to Ethernet device structure.
872 * 0 on success, a negative errno value otherwise and rte_errno is set.
875 mlx5_rx_intr_enable(struct rte_eth_dev
*dev
, uint16_t rx_queue_id
)
877 struct mlx5_priv
*priv
= dev
->data
->dev_private
;
878 struct mlx5_rxq_data
*rxq_data
;
879 struct mlx5_rxq_ctrl
*rxq_ctrl
;
881 rxq_data
= (*priv
->rxqs
)[rx_queue_id
];
886 rxq_ctrl
= container_of(rxq_data
, struct mlx5_rxq_ctrl
, rxq
);
888 struct mlx5_rxq_obj
*rxq_obj
;
890 rxq_obj
= mlx5_rxq_obj_get(dev
, rx_queue_id
);
895 mlx5_arm_cq(rxq_data
, rxq_data
->cq_arm_sn
);
896 mlx5_rxq_obj_release(rxq_obj
);
902 * DPDK callback for Rx queue interrupt disable.
905 * Pointer to Ethernet device structure.
910 * 0 on success, a negative errno value otherwise and rte_errno is set.
913 mlx5_rx_intr_disable(struct rte_eth_dev
*dev
, uint16_t rx_queue_id
)
915 struct mlx5_priv
*priv
= dev
->data
->dev_private
;
916 struct mlx5_rxq_data
*rxq_data
;
917 struct mlx5_rxq_ctrl
*rxq_ctrl
;
918 struct mlx5_rxq_obj
*rxq_obj
= NULL
;
919 struct ibv_cq
*ev_cq
;
923 rxq_data
= (*priv
->rxqs
)[rx_queue_id
];
928 rxq_ctrl
= container_of(rxq_data
, struct mlx5_rxq_ctrl
, rxq
);
931 rxq_obj
= mlx5_rxq_obj_get(dev
, rx_queue_id
);
936 ret
= mlx5_glue
->get_cq_event(rxq_obj
->channel
, &ev_cq
, &ev_ctx
);
937 if (ret
|| ev_cq
!= rxq_obj
->cq
) {
941 rxq_data
->cq_arm_sn
++;
942 mlx5_glue
->ack_cq_events(rxq_obj
->cq
, 1);
943 mlx5_rxq_obj_release(rxq_obj
);
946 ret
= rte_errno
; /* Save rte_errno before cleanup. */
948 mlx5_rxq_obj_release(rxq_obj
);
949 DRV_LOG(WARNING
, "port %u unable to disable interrupt on Rx queue %d",
950 dev
->data
->port_id
, rx_queue_id
);
951 rte_errno
= ret
; /* Restore rte_errno. */
956 * Create a CQ Verbs object.
959 * Pointer to Ethernet device.
961 * Pointer to device private data.
963 * Pointer to Rx queue data.
965 * Number of CQEs in CQ.
967 * Pointer to Rx queue object data.
970 * The Verbs object initialised, NULL otherwise and rte_errno is set.
972 static struct ibv_cq
*
973 mlx5_ibv_cq_new(struct rte_eth_dev
*dev
, struct mlx5_priv
*priv
,
974 struct mlx5_rxq_data
*rxq_data
,
975 unsigned int cqe_n
, struct mlx5_rxq_obj
*rxq_obj
)
978 struct ibv_cq_init_attr_ex ibv
;
979 struct mlx5dv_cq_init_attr mlx5
;
982 cq_attr
.ibv
= (struct ibv_cq_init_attr_ex
){
984 .channel
= rxq_obj
->channel
,
987 cq_attr
.mlx5
= (struct mlx5dv_cq_init_attr
){
990 if (priv
->config
.cqe_comp
&& !rxq_data
->hw_timestamp
&&
992 cq_attr
.mlx5
.comp_mask
|=
993 MLX5DV_CQ_INIT_ATTR_MASK_COMPRESSED_CQE
;
994 #ifdef HAVE_IBV_DEVICE_STRIDING_RQ_SUPPORT
995 cq_attr
.mlx5
.cqe_comp_res_format
=
996 mlx5_rxq_mprq_enabled(rxq_data
) ?
997 MLX5DV_CQE_RES_FORMAT_CSUM_STRIDX
:
998 MLX5DV_CQE_RES_FORMAT_HASH
;
1000 cq_attr
.mlx5
.cqe_comp_res_format
= MLX5DV_CQE_RES_FORMAT_HASH
;
1003 * For vectorized Rx, it must not be doubled in order to
1004 * make cq_ci and rq_ci aligned.
1006 if (mlx5_rxq_check_vec_support(rxq_data
) < 0)
1007 cq_attr
.ibv
.cqe
*= 2;
1008 } else if (priv
->config
.cqe_comp
&& rxq_data
->hw_timestamp
) {
1010 "port %u Rx CQE compression is disabled for HW"
1012 dev
->data
->port_id
);
1013 } else if (priv
->config
.cqe_comp
&& rxq_data
->lro
) {
1015 "port %u Rx CQE compression is disabled for LRO",
1016 dev
->data
->port_id
);
1018 #ifdef HAVE_IBV_MLX5_MOD_CQE_128B_PAD
1019 if (priv
->config
.cqe_pad
) {
1020 cq_attr
.mlx5
.comp_mask
|= MLX5DV_CQ_INIT_ATTR_MASK_FLAGS
;
1021 cq_attr
.mlx5
.flags
|= MLX5DV_CQ_INIT_ATTR_FLAGS_CQE_PAD
;
1024 return mlx5_glue
->cq_ex_to_cq(mlx5_glue
->dv_create_cq(priv
->sh
->ctx
,
1030 * Create a WQ Verbs object.
1033 * Pointer to Ethernet device.
1035 * Pointer to device private data.
1037 * Pointer to Rx queue data.
1039 * Queue index in DPDK Rx queue array
1041 * Number of WQEs in WQ.
1043 * Pointer to Rx queue object data.
1046 * The Verbs object initialised, NULL otherwise and rte_errno is set.
1048 static struct ibv_wq
*
1049 mlx5_ibv_wq_new(struct rte_eth_dev
*dev
, struct mlx5_priv
*priv
,
1050 struct mlx5_rxq_data
*rxq_data
, uint16_t idx
,
1051 unsigned int wqe_n
, struct mlx5_rxq_obj
*rxq_obj
)
1054 struct ibv_wq_init_attr ibv
;
1055 #ifdef HAVE_IBV_DEVICE_STRIDING_RQ_SUPPORT
1056 struct mlx5dv_wq_init_attr mlx5
;
1060 wq_attr
.ibv
= (struct ibv_wq_init_attr
){
1061 .wq_context
= NULL
, /* Could be useful in the future. */
1062 .wq_type
= IBV_WQT_RQ
,
1063 /* Max number of outstanding WRs. */
1064 .max_wr
= wqe_n
>> rxq_data
->sges_n
,
1065 /* Max number of scatter/gather elements in a WR. */
1066 .max_sge
= 1 << rxq_data
->sges_n
,
1069 .comp_mask
= IBV_WQ_FLAGS_CVLAN_STRIPPING
| 0,
1070 .create_flags
= (rxq_data
->vlan_strip
?
1071 IBV_WQ_FLAGS_CVLAN_STRIPPING
: 0),
1073 /* By default, FCS (CRC) is stripped by hardware. */
1074 if (rxq_data
->crc_present
) {
1075 wq_attr
.ibv
.create_flags
|= IBV_WQ_FLAGS_SCATTER_FCS
;
1076 wq_attr
.ibv
.comp_mask
|= IBV_WQ_INIT_ATTR_FLAGS
;
1078 if (priv
->config
.hw_padding
) {
1079 #if defined(HAVE_IBV_WQ_FLAG_RX_END_PADDING)
1080 wq_attr
.ibv
.create_flags
|= IBV_WQ_FLAG_RX_END_PADDING
;
1081 wq_attr
.ibv
.comp_mask
|= IBV_WQ_INIT_ATTR_FLAGS
;
1082 #elif defined(HAVE_IBV_WQ_FLAGS_PCI_WRITE_END_PADDING)
1083 wq_attr
.ibv
.create_flags
|= IBV_WQ_FLAGS_PCI_WRITE_END_PADDING
;
1084 wq_attr
.ibv
.comp_mask
|= IBV_WQ_INIT_ATTR_FLAGS
;
1087 #ifdef HAVE_IBV_DEVICE_STRIDING_RQ_SUPPORT
1088 wq_attr
.mlx5
= (struct mlx5dv_wq_init_attr
){
1091 if (mlx5_rxq_mprq_enabled(rxq_data
)) {
1092 struct mlx5dv_striding_rq_init_attr
*mprq_attr
=
1093 &wq_attr
.mlx5
.striding_rq_attrs
;
1095 wq_attr
.mlx5
.comp_mask
|= MLX5DV_WQ_INIT_ATTR_MASK_STRIDING_RQ
;
1096 *mprq_attr
= (struct mlx5dv_striding_rq_init_attr
){
1097 .single_stride_log_num_of_bytes
= rxq_data
->strd_sz_n
,
1098 .single_wqe_log_num_of_strides
= rxq_data
->strd_num_n
,
1099 .two_byte_shift_en
= MLX5_MPRQ_TWO_BYTE_SHIFT
,
1102 rxq_obj
->wq
= mlx5_glue
->dv_create_wq(priv
->sh
->ctx
, &wq_attr
.ibv
,
1105 rxq_obj
->wq
= mlx5_glue
->create_wq(priv
->sh
->ctx
, &wq_attr
.ibv
);
1109 * Make sure number of WRs*SGEs match expectations since a queue
1110 * cannot allocate more than "desc" buffers.
1112 if (wq_attr
.ibv
.max_wr
!= (wqe_n
>> rxq_data
->sges_n
) ||
1113 wq_attr
.ibv
.max_sge
!= (1u << rxq_data
->sges_n
)) {
1115 "port %u Rx queue %u requested %u*%u but got"
1117 dev
->data
->port_id
, idx
,
1118 wqe_n
>> rxq_data
->sges_n
,
1119 (1 << rxq_data
->sges_n
),
1120 wq_attr
.ibv
.max_wr
, wq_attr
.ibv
.max_sge
);
1121 claim_zero(mlx5_glue
->destroy_wq(rxq_obj
->wq
));
1130 * Fill common fields of create RQ attributes structure.
1133 * Pointer to Rx queue data.
1135 * CQ number to use with this RQ.
1137 * RQ attributes structure to fill..
1140 mlx5_devx_create_rq_attr_fill(struct mlx5_rxq_data
*rxq_data
, uint32_t cqn
,
1141 struct mlx5_devx_create_rq_attr
*rq_attr
)
1143 rq_attr
->state
= MLX5_RQC_STATE_RST
;
1144 rq_attr
->vsd
= (rxq_data
->vlan_strip
) ? 0 : 1;
1146 rq_attr
->scatter_fcs
= (rxq_data
->crc_present
) ? 1 : 0;
1150 * Fill common fields of DevX WQ attributes structure.
1153 * Pointer to device private data.
1155 * Pointer to Rx queue control structure.
1157 * WQ attributes structure to fill..
1160 mlx5_devx_wq_attr_fill(struct mlx5_priv
*priv
, struct mlx5_rxq_ctrl
*rxq_ctrl
,
1161 struct mlx5_devx_wq_attr
*wq_attr
)
1163 wq_attr
->end_padding_mode
= priv
->config
.cqe_pad
?
1164 MLX5_WQ_END_PAD_MODE_ALIGN
:
1165 MLX5_WQ_END_PAD_MODE_NONE
;
1166 wq_attr
->pd
= priv
->sh
->pdn
;
1167 wq_attr
->dbr_addr
= rxq_ctrl
->dbr_offset
;
1168 wq_attr
->dbr_umem_id
= rxq_ctrl
->dbr_umem_id
;
1169 wq_attr
->dbr_umem_valid
= 1;
1170 wq_attr
->wq_umem_id
= rxq_ctrl
->wq_umem
->umem_id
;
1171 wq_attr
->wq_umem_valid
= 1;
1175 * Create a RQ object using DevX.
1178 * Pointer to Ethernet device.
1180 * Queue index in DPDK Rx queue array
1182 * CQ number to use with this RQ.
1185 * The DevX object initialised, NULL otherwise and rte_errno is set.
1187 static struct mlx5_devx_obj
*
1188 mlx5_devx_rq_new(struct rte_eth_dev
*dev
, uint16_t idx
, uint32_t cqn
)
1190 struct mlx5_priv
*priv
= dev
->data
->dev_private
;
1191 struct mlx5_rxq_data
*rxq_data
= (*priv
->rxqs
)[idx
];
1192 struct mlx5_rxq_ctrl
*rxq_ctrl
=
1193 container_of(rxq_data
, struct mlx5_rxq_ctrl
, rxq
);
1194 struct mlx5_devx_create_rq_attr rq_attr
;
1195 uint32_t wqe_n
= 1 << (rxq_data
->elts_n
- rxq_data
->sges_n
);
1196 uint32_t wq_size
= 0;
1197 uint32_t wqe_size
= 0;
1198 uint32_t log_wqe_size
= 0;
1200 struct mlx5_devx_obj
*rq
;
1202 memset(&rq_attr
, 0, sizeof(rq_attr
));
1203 /* Fill RQ attributes. */
1204 rq_attr
.mem_rq_type
= MLX5_RQC_MEM_RQ_TYPE_MEMORY_RQ_INLINE
;
1205 rq_attr
.flush_in_error_en
= 1;
1206 mlx5_devx_create_rq_attr_fill(rxq_data
, cqn
, &rq_attr
);
1207 /* Fill WQ attributes for this RQ. */
1208 if (mlx5_rxq_mprq_enabled(rxq_data
)) {
1209 rq_attr
.wq_attr
.wq_type
= MLX5_WQ_TYPE_CYCLIC_STRIDING_RQ
;
1211 * Number of strides in each WQE:
1212 * 512*2^single_wqe_log_num_of_strides.
1214 rq_attr
.wq_attr
.single_wqe_log_num_of_strides
=
1215 rxq_data
->strd_num_n
-
1216 MLX5_MIN_SINGLE_WQE_LOG_NUM_STRIDES
;
1217 /* Stride size = (2^single_stride_log_num_of_bytes)*64B. */
1218 rq_attr
.wq_attr
.single_stride_log_num_of_bytes
=
1219 rxq_data
->strd_sz_n
-
1220 MLX5_MIN_SINGLE_STRIDE_LOG_NUM_BYTES
;
1221 wqe_size
= sizeof(struct mlx5_wqe_mprq
);
1223 rq_attr
.wq_attr
.wq_type
= MLX5_WQ_TYPE_CYCLIC
;
1224 wqe_size
= sizeof(struct mlx5_wqe_data_seg
);
1226 log_wqe_size
= log2above(wqe_size
) + rxq_data
->sges_n
;
1227 rq_attr
.wq_attr
.log_wq_stride
= log_wqe_size
;
1228 rq_attr
.wq_attr
.log_wq_sz
= rxq_data
->elts_n
- rxq_data
->sges_n
;
1229 /* Calculate and allocate WQ memory space. */
1230 wqe_size
= 1 << log_wqe_size
; /* round up power of two.*/
1231 wq_size
= wqe_n
* wqe_size
;
1232 buf
= rte_calloc_socket(__func__
, 1, wq_size
, MLX5_WQE_BUF_ALIGNMENT
,
1236 rxq_data
->wqes
= buf
;
1237 rxq_ctrl
->wq_umem
= mlx5_glue
->devx_umem_reg(priv
->sh
->ctx
,
1239 if (!rxq_ctrl
->wq_umem
) {
1243 mlx5_devx_wq_attr_fill(priv
, rxq_ctrl
, &rq_attr
.wq_attr
);
1244 rq
= mlx5_devx_cmd_create_rq(priv
->sh
->ctx
, &rq_attr
, rxq_ctrl
->socket
);
1246 rxq_release_rq_resources(rxq_ctrl
);
1251 * Create the Rx hairpin queue object.
1254 * Pointer to Ethernet device.
1256 * Queue index in DPDK Rx queue array
1259 * The hairpin DevX object initialised, NULL otherwise and rte_errno is set.
1261 static struct mlx5_rxq_obj
*
1262 mlx5_rxq_obj_hairpin_new(struct rte_eth_dev
*dev
, uint16_t idx
)
1264 struct mlx5_priv
*priv
= dev
->data
->dev_private
;
1265 struct mlx5_rxq_data
*rxq_data
= (*priv
->rxqs
)[idx
];
1266 struct mlx5_rxq_ctrl
*rxq_ctrl
=
1267 container_of(rxq_data
, struct mlx5_rxq_ctrl
, rxq
);
1268 struct mlx5_devx_create_rq_attr attr
= { 0 };
1269 struct mlx5_rxq_obj
*tmpl
= NULL
;
1271 uint32_t max_wq_data
;
1273 MLX5_ASSERT(rxq_data
);
1274 MLX5_ASSERT(!rxq_ctrl
->obj
);
1275 tmpl
= rte_calloc_socket(__func__
, 1, sizeof(*tmpl
), 0,
1279 "port %u Rx queue %u cannot allocate verbs resources",
1280 dev
->data
->port_id
, rxq_data
->idx
);
1284 tmpl
->type
= MLX5_RXQ_OBJ_TYPE_DEVX_HAIRPIN
;
1285 tmpl
->rxq_ctrl
= rxq_ctrl
;
1287 max_wq_data
= priv
->config
.hca_attr
.log_max_hairpin_wq_data_sz
;
1288 /* Jumbo frames > 9KB should be supported, and more packets. */
1289 if (priv
->config
.log_hp_size
!= (uint32_t)MLX5_ARG_UNSET
) {
1290 if (priv
->config
.log_hp_size
> max_wq_data
) {
1291 DRV_LOG(ERR
, "total data size %u power of 2 is "
1292 "too large for hairpin",
1293 priv
->config
.log_hp_size
);
1297 attr
.wq_attr
.log_hairpin_data_sz
= priv
->config
.log_hp_size
;
1299 attr
.wq_attr
.log_hairpin_data_sz
=
1300 (max_wq_data
< MLX5_HAIRPIN_JUMBO_LOG_SIZE
) ?
1301 max_wq_data
: MLX5_HAIRPIN_JUMBO_LOG_SIZE
;
1303 /* Set the packets number to the maximum value for performance. */
1304 attr
.wq_attr
.log_hairpin_num_packets
=
1305 attr
.wq_attr
.log_hairpin_data_sz
-
1306 MLX5_HAIRPIN_QUEUE_STRIDE
;
1307 tmpl
->rq
= mlx5_devx_cmd_create_rq(priv
->sh
->ctx
, &attr
,
1311 "port %u Rx hairpin queue %u can't create rq object",
1312 dev
->data
->port_id
, idx
);
1316 DRV_LOG(DEBUG
, "port %u rxq %u updated with %p", dev
->data
->port_id
,
1317 idx
, (void *)&tmpl
);
1318 rte_atomic32_inc(&tmpl
->refcnt
);
1319 LIST_INSERT_HEAD(&priv
->rxqsobj
, tmpl
, next
);
1320 priv
->verbs_alloc_ctx
.type
= MLX5_VERBS_ALLOC_TYPE_NONE
;
1323 ret
= rte_errno
; /* Save rte_errno before cleanup. */
1325 mlx5_devx_cmd_destroy(tmpl
->rq
);
1326 rte_errno
= ret
; /* Restore rte_errno. */
1331 * Create the Rx queue Verbs/DevX object.
1334 * Pointer to Ethernet device.
1336 * Queue index in DPDK Rx queue array
1338 * Type of Rx queue object to create.
1341 * The Verbs/DevX object initialised, NULL otherwise and rte_errno is set.
1343 struct mlx5_rxq_obj
*
1344 mlx5_rxq_obj_new(struct rte_eth_dev
*dev
, uint16_t idx
,
1345 enum mlx5_rxq_obj_type type
)
1347 struct mlx5_priv
*priv
= dev
->data
->dev_private
;
1348 struct mlx5_rxq_data
*rxq_data
= (*priv
->rxqs
)[idx
];
1349 struct mlx5_rxq_ctrl
*rxq_ctrl
=
1350 container_of(rxq_data
, struct mlx5_rxq_ctrl
, rxq
);
1351 struct ibv_wq_attr mod
;
1353 unsigned int wqe_n
= 1 << rxq_data
->elts_n
;
1354 struct mlx5_rxq_obj
*tmpl
= NULL
;
1355 struct mlx5dv_cq cq_info
;
1356 struct mlx5dv_rwq rwq
;
1358 struct mlx5dv_obj obj
;
1360 MLX5_ASSERT(rxq_data
);
1361 MLX5_ASSERT(!rxq_ctrl
->obj
);
1362 if (type
== MLX5_RXQ_OBJ_TYPE_DEVX_HAIRPIN
)
1363 return mlx5_rxq_obj_hairpin_new(dev
, idx
);
1364 priv
->verbs_alloc_ctx
.type
= MLX5_VERBS_ALLOC_TYPE_RX_QUEUE
;
1365 priv
->verbs_alloc_ctx
.obj
= rxq_ctrl
;
1366 tmpl
= rte_calloc_socket(__func__
, 1, sizeof(*tmpl
), 0,
1370 "port %u Rx queue %u cannot allocate verbs resources",
1371 dev
->data
->port_id
, rxq_data
->idx
);
1376 tmpl
->rxq_ctrl
= rxq_ctrl
;
1377 if (rxq_ctrl
->irq
) {
1378 tmpl
->channel
= mlx5_glue
->create_comp_channel(priv
->sh
->ctx
);
1379 if (!tmpl
->channel
) {
1380 DRV_LOG(ERR
, "port %u: comp channel creation failure",
1381 dev
->data
->port_id
);
1386 if (mlx5_rxq_mprq_enabled(rxq_data
))
1387 cqe_n
= wqe_n
* (1 << rxq_data
->strd_num_n
) - 1;
1390 tmpl
->cq
= mlx5_ibv_cq_new(dev
, priv
, rxq_data
, cqe_n
, tmpl
);
1392 DRV_LOG(ERR
, "port %u Rx queue %u CQ creation failure",
1393 dev
->data
->port_id
, idx
);
1397 obj
.cq
.in
= tmpl
->cq
;
1398 obj
.cq
.out
= &cq_info
;
1399 ret
= mlx5_glue
->dv_init_obj(&obj
, MLX5DV_OBJ_CQ
);
1404 if (cq_info
.cqe_size
!= RTE_CACHE_LINE_SIZE
) {
1406 "port %u wrong MLX5_CQE_SIZE environment variable"
1407 " value: it should be set to %u",
1408 dev
->data
->port_id
, RTE_CACHE_LINE_SIZE
);
1412 DRV_LOG(DEBUG
, "port %u device_attr.max_qp_wr is %d",
1413 dev
->data
->port_id
, priv
->sh
->device_attr
.orig_attr
.max_qp_wr
);
1414 DRV_LOG(DEBUG
, "port %u device_attr.max_sge is %d",
1415 dev
->data
->port_id
, priv
->sh
->device_attr
.orig_attr
.max_sge
);
1416 /* Allocate door-bell for types created with DevX. */
1417 if (tmpl
->type
!= MLX5_RXQ_OBJ_TYPE_IBV
) {
1418 struct mlx5_devx_dbr_page
*dbr_page
;
1421 dbr_offset
= mlx5_get_dbr(dev
, &dbr_page
);
1424 rxq_ctrl
->dbr_offset
= dbr_offset
;
1425 rxq_ctrl
->dbr_umem_id
= dbr_page
->umem
->umem_id
;
1426 rxq_ctrl
->dbr_umem_id_valid
= 1;
1427 rxq_data
->rq_db
= (uint32_t *)((uintptr_t)dbr_page
->dbrs
+
1428 (uintptr_t)rxq_ctrl
->dbr_offset
);
1430 if (tmpl
->type
== MLX5_RXQ_OBJ_TYPE_IBV
) {
1431 tmpl
->wq
= mlx5_ibv_wq_new(dev
, priv
, rxq_data
, idx
, wqe_n
,
1434 DRV_LOG(ERR
, "port %u Rx queue %u WQ creation failure",
1435 dev
->data
->port_id
, idx
);
1439 /* Change queue state to ready. */
1440 mod
= (struct ibv_wq_attr
){
1441 .attr_mask
= IBV_WQ_ATTR_STATE
,
1442 .wq_state
= IBV_WQS_RDY
,
1444 ret
= mlx5_glue
->modify_wq(tmpl
->wq
, &mod
);
1447 "port %u Rx queue %u WQ state to IBV_WQS_RDY"
1448 " failed", dev
->data
->port_id
, idx
);
1452 obj
.rwq
.in
= tmpl
->wq
;
1454 ret
= mlx5_glue
->dv_init_obj(&obj
, MLX5DV_OBJ_RWQ
);
1459 rxq_data
->wqes
= rwq
.buf
;
1460 rxq_data
->rq_db
= rwq
.dbrec
;
1461 } else if (tmpl
->type
== MLX5_RXQ_OBJ_TYPE_DEVX_RQ
) {
1462 struct mlx5_devx_modify_rq_attr rq_attr
;
1464 memset(&rq_attr
, 0, sizeof(rq_attr
));
1465 tmpl
->rq
= mlx5_devx_rq_new(dev
, idx
, cq_info
.cqn
);
1467 DRV_LOG(ERR
, "port %u Rx queue %u RQ creation failure",
1468 dev
->data
->port_id
, idx
);
1472 /* Change queue state to ready. */
1473 rq_attr
.rq_state
= MLX5_RQC_STATE_RST
;
1474 rq_attr
.state
= MLX5_RQC_STATE_RDY
;
1475 ret
= mlx5_devx_cmd_modify_rq(tmpl
->rq
, &rq_attr
);
1479 /* Fill the rings. */
1480 rxq_data
->cqe_n
= log2above(cq_info
.cqe_cnt
);
1481 rxq_data
->cq_db
= cq_info
.dbrec
;
1482 rxq_data
->cqes
= (volatile struct mlx5_cqe (*)[])(uintptr_t)cq_info
.buf
;
1483 rxq_data
->cq_uar
= cq_info
.cq_uar
;
1484 rxq_data
->cqn
= cq_info
.cqn
;
1485 rxq_data
->cq_arm_sn
= 0;
1486 mlx5_rxq_initialize(rxq_data
);
1487 rxq_data
->cq_ci
= 0;
1488 DRV_LOG(DEBUG
, "port %u rxq %u updated with %p", dev
->data
->port_id
,
1489 idx
, (void *)&tmpl
);
1490 rte_atomic32_inc(&tmpl
->refcnt
);
1491 LIST_INSERT_HEAD(&priv
->rxqsobj
, tmpl
, next
);
1492 priv
->verbs_alloc_ctx
.type
= MLX5_VERBS_ALLOC_TYPE_NONE
;
1496 ret
= rte_errno
; /* Save rte_errno before cleanup. */
1497 if (tmpl
->type
== MLX5_RXQ_OBJ_TYPE_IBV
&& tmpl
->wq
)
1498 claim_zero(mlx5_glue
->destroy_wq(tmpl
->wq
));
1499 else if (tmpl
->type
== MLX5_RXQ_OBJ_TYPE_DEVX_RQ
&& tmpl
->rq
)
1500 claim_zero(mlx5_devx_cmd_destroy(tmpl
->rq
));
1502 claim_zero(mlx5_glue
->destroy_cq(tmpl
->cq
));
1504 claim_zero(mlx5_glue
->destroy_comp_channel
1507 rte_errno
= ret
; /* Restore rte_errno. */
1509 if (type
== MLX5_RXQ_OBJ_TYPE_DEVX_RQ
)
1510 rxq_release_rq_resources(rxq_ctrl
);
1511 priv
->verbs_alloc_ctx
.type
= MLX5_VERBS_ALLOC_TYPE_NONE
;
1516 * Verify the Rx queue objects list is empty
1519 * Pointer to Ethernet device.
1522 * The number of objects not released.
1525 mlx5_rxq_obj_verify(struct rte_eth_dev
*dev
)
1527 struct mlx5_priv
*priv
= dev
->data
->dev_private
;
1529 struct mlx5_rxq_obj
*rxq_obj
;
1531 LIST_FOREACH(rxq_obj
, &priv
->rxqsobj
, next
) {
1532 DRV_LOG(DEBUG
, "port %u Rx queue %u still referenced",
1533 dev
->data
->port_id
, rxq_obj
->rxq_ctrl
->rxq
.idx
);
1540 * Callback function to initialize mbufs for Multi-Packet RQ.
1543 mlx5_mprq_buf_init(struct rte_mempool
*mp
, void *opaque_arg
,
1544 void *_m
, unsigned int i __rte_unused
)
1546 struct mlx5_mprq_buf
*buf
= _m
;
1547 struct rte_mbuf_ext_shared_info
*shinfo
;
1548 unsigned int strd_n
= (unsigned int)(uintptr_t)opaque_arg
;
1551 memset(_m
, 0, sizeof(*buf
));
1553 rte_atomic16_set(&buf
->refcnt
, 1);
1554 for (j
= 0; j
!= strd_n
; ++j
) {
1555 shinfo
= &buf
->shinfos
[j
];
1556 shinfo
->free_cb
= mlx5_mprq_buf_free_cb
;
1557 shinfo
->fcb_opaque
= buf
;
1562 * Free mempool of Multi-Packet RQ.
1565 * Pointer to Ethernet device.
1568 * 0 on success, negative errno value on failure.
1571 mlx5_mprq_free_mp(struct rte_eth_dev
*dev
)
1573 struct mlx5_priv
*priv
= dev
->data
->dev_private
;
1574 struct rte_mempool
*mp
= priv
->mprq_mp
;
1579 DRV_LOG(DEBUG
, "port %u freeing mempool (%s) for Multi-Packet RQ",
1580 dev
->data
->port_id
, mp
->name
);
1582 * If a buffer in the pool has been externally attached to a mbuf and it
1583 * is still in use by application, destroying the Rx queue can spoil
1584 * the packet. It is unlikely to happen but if application dynamically
1585 * creates and destroys with holding Rx packets, this can happen.
1587 * TODO: It is unavoidable for now because the mempool for Multi-Packet
1588 * RQ isn't provided by application but managed by PMD.
1590 if (!rte_mempool_full(mp
)) {
1592 "port %u mempool for Multi-Packet RQ is still in use",
1593 dev
->data
->port_id
);
1597 rte_mempool_free(mp
);
1598 /* Unset mempool for each Rx queue. */
1599 for (i
= 0; i
!= priv
->rxqs_n
; ++i
) {
1600 struct mlx5_rxq_data
*rxq
= (*priv
->rxqs
)[i
];
1604 rxq
->mprq_mp
= NULL
;
1606 priv
->mprq_mp
= NULL
;
1611 * Allocate a mempool for Multi-Packet RQ. All configured Rx queues share the
1612 * mempool. If already allocated, reuse it if there're enough elements.
1613 * Otherwise, resize it.
1616 * Pointer to Ethernet device.
1619 * 0 on success, negative errno value on failure.
1622 mlx5_mprq_alloc_mp(struct rte_eth_dev
*dev
)
1624 struct mlx5_priv
*priv
= dev
->data
->dev_private
;
1625 struct rte_mempool
*mp
= priv
->mprq_mp
;
1626 char name
[RTE_MEMPOOL_NAMESIZE
];
1627 unsigned int desc
= 0;
1628 unsigned int buf_len
;
1629 unsigned int obj_num
;
1630 unsigned int obj_size
;
1631 unsigned int strd_num_n
= 0;
1632 unsigned int strd_sz_n
= 0;
1634 unsigned int n_ibv
= 0;
1636 if (!mlx5_mprq_enabled(dev
))
1638 /* Count the total number of descriptors configured. */
1639 for (i
= 0; i
!= priv
->rxqs_n
; ++i
) {
1640 struct mlx5_rxq_data
*rxq
= (*priv
->rxqs
)[i
];
1641 struct mlx5_rxq_ctrl
*rxq_ctrl
= container_of
1642 (rxq
, struct mlx5_rxq_ctrl
, rxq
);
1644 if (rxq
== NULL
|| rxq_ctrl
->type
!= MLX5_RXQ_TYPE_STANDARD
)
1647 desc
+= 1 << rxq
->elts_n
;
1648 /* Get the max number of strides. */
1649 if (strd_num_n
< rxq
->strd_num_n
)
1650 strd_num_n
= rxq
->strd_num_n
;
1651 /* Get the max size of a stride. */
1652 if (strd_sz_n
< rxq
->strd_sz_n
)
1653 strd_sz_n
= rxq
->strd_sz_n
;
1655 MLX5_ASSERT(strd_num_n
&& strd_sz_n
);
1656 buf_len
= (1 << strd_num_n
) * (1 << strd_sz_n
);
1657 obj_size
= sizeof(struct mlx5_mprq_buf
) + buf_len
+ (1 << strd_num_n
) *
1658 sizeof(struct rte_mbuf_ext_shared_info
) + RTE_PKTMBUF_HEADROOM
;
1660 * Received packets can be either memcpy'd or externally referenced. In
1661 * case that the packet is attached to an mbuf as an external buffer, as
1662 * it isn't possible to predict how the buffers will be queued by
1663 * application, there's no option to exactly pre-allocate needed buffers
1664 * in advance but to speculatively prepares enough buffers.
1666 * In the data path, if this Mempool is depleted, PMD will try to memcpy
1667 * received packets to buffers provided by application (rxq->mp) until
1668 * this Mempool gets available again.
1671 obj_num
= desc
+ MLX5_MPRQ_MP_CACHE_SZ
* n_ibv
;
1673 * rte_mempool_create_empty() has sanity check to refuse large cache
1674 * size compared to the number of elements.
1675 * CACHE_FLUSHTHRESH_MULTIPLIER is defined in a C file, so using a
1676 * constant number 2 instead.
1678 obj_num
= RTE_MAX(obj_num
, MLX5_MPRQ_MP_CACHE_SZ
* 2);
1679 /* Check a mempool is already allocated and if it can be resued. */
1680 if (mp
!= NULL
&& mp
->elt_size
>= obj_size
&& mp
->size
>= obj_num
) {
1681 DRV_LOG(DEBUG
, "port %u mempool %s is being reused",
1682 dev
->data
->port_id
, mp
->name
);
1685 } else if (mp
!= NULL
) {
1686 DRV_LOG(DEBUG
, "port %u mempool %s should be resized, freeing it",
1687 dev
->data
->port_id
, mp
->name
);
1689 * If failed to free, which means it may be still in use, no way
1690 * but to keep using the existing one. On buffer underrun,
1691 * packets will be memcpy'd instead of external buffer
1694 if (mlx5_mprq_free_mp(dev
)) {
1695 if (mp
->elt_size
>= obj_size
)
1701 snprintf(name
, sizeof(name
), "port-%u-mprq", dev
->data
->port_id
);
1702 mp
= rte_mempool_create(name
, obj_num
, obj_size
, MLX5_MPRQ_MP_CACHE_SZ
,
1703 0, NULL
, NULL
, mlx5_mprq_buf_init
,
1704 (void *)(uintptr_t)(1 << strd_num_n
),
1705 dev
->device
->numa_node
, 0);
1708 "port %u failed to allocate a mempool for"
1709 " Multi-Packet RQ, count=%u, size=%u",
1710 dev
->data
->port_id
, obj_num
, obj_size
);
1716 /* Set mempool for each Rx queue. */
1717 for (i
= 0; i
!= priv
->rxqs_n
; ++i
) {
1718 struct mlx5_rxq_data
*rxq
= (*priv
->rxqs
)[i
];
1719 struct mlx5_rxq_ctrl
*rxq_ctrl
= container_of
1720 (rxq
, struct mlx5_rxq_ctrl
, rxq
);
1722 if (rxq
== NULL
|| rxq_ctrl
->type
!= MLX5_RXQ_TYPE_STANDARD
)
1726 DRV_LOG(INFO
, "port %u Multi-Packet RQ is configured",
1727 dev
->data
->port_id
);
1731 #define MLX5_MAX_TCP_HDR_OFFSET ((unsigned int)(sizeof(struct rte_ether_hdr) + \
1732 sizeof(struct rte_vlan_hdr) * 2 + \
1733 sizeof(struct rte_ipv6_hdr)))
1734 #define MAX_TCP_OPTION_SIZE 40u
1735 #define MLX5_MAX_LRO_HEADER_FIX ((unsigned int)(MLX5_MAX_TCP_HDR_OFFSET + \
1736 sizeof(struct rte_tcp_hdr) + \
1737 MAX_TCP_OPTION_SIZE))
1740 * Adjust the maximum LRO massage size.
1743 * Pointer to Ethernet device.
1746 * @param max_lro_size
1747 * The maximum size for LRO packet.
1750 mlx5_max_lro_msg_size_adjust(struct rte_eth_dev
*dev
, uint16_t idx
,
1751 uint32_t max_lro_size
)
1753 struct mlx5_priv
*priv
= dev
->data
->dev_private
;
1755 if (priv
->config
.hca_attr
.lro_max_msg_sz_mode
==
1756 MLX5_LRO_MAX_MSG_SIZE_START_FROM_L4
&& max_lro_size
>
1757 MLX5_MAX_TCP_HDR_OFFSET
)
1758 max_lro_size
-= MLX5_MAX_TCP_HDR_OFFSET
;
1759 max_lro_size
= RTE_MIN(max_lro_size
, MLX5_MAX_LRO_SIZE
);
1760 MLX5_ASSERT(max_lro_size
>= MLX5_LRO_SEG_CHUNK_SIZE
);
1761 max_lro_size
/= MLX5_LRO_SEG_CHUNK_SIZE
;
1762 if (priv
->max_lro_msg_size
)
1763 priv
->max_lro_msg_size
=
1764 RTE_MIN((uint32_t)priv
->max_lro_msg_size
, max_lro_size
);
1766 priv
->max_lro_msg_size
= max_lro_size
;
1768 "port %u Rx Queue %u max LRO message size adjusted to %u bytes",
1769 dev
->data
->port_id
, idx
,
1770 priv
->max_lro_msg_size
* MLX5_LRO_SEG_CHUNK_SIZE
);
1774 * Create a DPDK Rx queue.
1777 * Pointer to Ethernet device.
1781 * Number of descriptors to configure in queue.
1783 * NUMA socket on which memory must be allocated.
1786 * A DPDK queue object on success, NULL otherwise and rte_errno is set.
1788 struct mlx5_rxq_ctrl
*
1789 mlx5_rxq_new(struct rte_eth_dev
*dev
, uint16_t idx
, uint16_t desc
,
1790 unsigned int socket
, const struct rte_eth_rxconf
*conf
,
1791 struct rte_mempool
*mp
)
1793 struct mlx5_priv
*priv
= dev
->data
->dev_private
;
1794 struct mlx5_rxq_ctrl
*tmpl
;
1795 unsigned int mb_len
= rte_pktmbuf_data_room_size(mp
);
1796 unsigned int mprq_stride_nums
;
1797 unsigned int mprq_stride_size
;
1798 unsigned int mprq_stride_cap
;
1799 struct mlx5_dev_config
*config
= &priv
->config
;
1801 * Always allocate extra slots, even if eventually
1802 * the vector Rx will not be used.
1805 desc
+ config
->rx_vec_en
* MLX5_VPMD_DESCS_PER_LOOP
;
1806 uint64_t offloads
= conf
->offloads
|
1807 dev
->data
->dev_conf
.rxmode
.offloads
;
1808 unsigned int lro_on_queue
= !!(offloads
& DEV_RX_OFFLOAD_TCP_LRO
);
1809 const int mprq_en
= mlx5_check_mprq_support(dev
) > 0;
1810 unsigned int max_rx_pkt_len
= lro_on_queue
?
1811 dev
->data
->dev_conf
.rxmode
.max_lro_pkt_size
:
1812 dev
->data
->dev_conf
.rxmode
.max_rx_pkt_len
;
1813 unsigned int non_scatter_min_mbuf_size
= max_rx_pkt_len
+
1814 RTE_PKTMBUF_HEADROOM
;
1815 unsigned int max_lro_size
= 0;
1816 unsigned int first_mb_free_size
= mb_len
- RTE_PKTMBUF_HEADROOM
;
1818 if (non_scatter_min_mbuf_size
> mb_len
&& !(offloads
&
1819 DEV_RX_OFFLOAD_SCATTER
)) {
1820 DRV_LOG(ERR
, "port %u Rx queue %u: Scatter offload is not"
1821 " configured and no enough mbuf space(%u) to contain "
1822 "the maximum RX packet length(%u) with head-room(%u)",
1823 dev
->data
->port_id
, idx
, mb_len
, max_rx_pkt_len
,
1824 RTE_PKTMBUF_HEADROOM
);
1828 tmpl
= rte_calloc_socket("RXQ", 1,
1830 desc_n
* sizeof(struct rte_mbuf
*),
1836 tmpl
->type
= MLX5_RXQ_TYPE_STANDARD
;
1837 if (mlx5_mr_btree_init(&tmpl
->rxq
.mr_ctrl
.cache_bh
,
1838 MLX5_MR_BTREE_CACHE_N
, socket
)) {
1839 /* rte_errno is already set. */
1842 tmpl
->socket
= socket
;
1843 if (dev
->data
->dev_conf
.intr_conf
.rxq
)
1845 mprq_stride_nums
= config
->mprq
.stride_num_n
?
1846 config
->mprq
.stride_num_n
: MLX5_MPRQ_STRIDE_NUM_N
;
1847 mprq_stride_size
= non_scatter_min_mbuf_size
<=
1848 (1U << config
->mprq
.max_stride_size_n
) ?
1849 log2above(non_scatter_min_mbuf_size
) : MLX5_MPRQ_STRIDE_SIZE_N
;
1850 mprq_stride_cap
= (config
->mprq
.stride_num_n
?
1851 (1U << config
->mprq
.stride_num_n
) : (1U << mprq_stride_nums
)) *
1852 (config
->mprq
.stride_size_n
?
1853 (1U << config
->mprq
.stride_size_n
) : (1U << mprq_stride_size
));
1855 * This Rx queue can be configured as a Multi-Packet RQ if all of the
1856 * following conditions are met:
1857 * - MPRQ is enabled.
1858 * - The number of descs is more than the number of strides.
1859 * - max_rx_pkt_len plus overhead is less than the max size
1860 * of a stride or mprq_stride_size is specified by a user.
1861 * Need to nake sure that there are enough stides to encap
1862 * the maximum packet size in case mprq_stride_size is set.
1863 * Otherwise, enable Rx scatter if necessary.
1865 if (mprq_en
&& desc
> (1U << mprq_stride_nums
) &&
1866 (non_scatter_min_mbuf_size
<=
1867 (1U << config
->mprq
.max_stride_size_n
) ||
1868 (config
->mprq
.stride_size_n
&&
1869 non_scatter_min_mbuf_size
<= mprq_stride_cap
))) {
1870 /* TODO: Rx scatter isn't supported yet. */
1871 tmpl
->rxq
.sges_n
= 0;
1872 /* Trim the number of descs needed. */
1873 desc
>>= mprq_stride_nums
;
1874 tmpl
->rxq
.strd_num_n
= config
->mprq
.stride_num_n
?
1875 config
->mprq
.stride_num_n
: mprq_stride_nums
;
1876 tmpl
->rxq
.strd_sz_n
= config
->mprq
.stride_size_n
?
1877 config
->mprq
.stride_size_n
: mprq_stride_size
;
1878 tmpl
->rxq
.strd_shift_en
= MLX5_MPRQ_TWO_BYTE_SHIFT
;
1879 tmpl
->rxq
.strd_scatter_en
=
1880 !!(offloads
& DEV_RX_OFFLOAD_SCATTER
);
1881 tmpl
->rxq
.mprq_max_memcpy_len
= RTE_MIN(first_mb_free_size
,
1882 config
->mprq
.max_memcpy_len
);
1883 max_lro_size
= RTE_MIN(max_rx_pkt_len
,
1884 (1u << tmpl
->rxq
.strd_num_n
) *
1885 (1u << tmpl
->rxq
.strd_sz_n
));
1887 "port %u Rx queue %u: Multi-Packet RQ is enabled"
1888 " strd_num_n = %u, strd_sz_n = %u",
1889 dev
->data
->port_id
, idx
,
1890 tmpl
->rxq
.strd_num_n
, tmpl
->rxq
.strd_sz_n
);
1891 } else if (max_rx_pkt_len
<= first_mb_free_size
) {
1892 tmpl
->rxq
.sges_n
= 0;
1893 max_lro_size
= max_rx_pkt_len
;
1894 } else if (offloads
& DEV_RX_OFFLOAD_SCATTER
) {
1895 unsigned int size
= non_scatter_min_mbuf_size
;
1896 unsigned int sges_n
;
1898 if (lro_on_queue
&& first_mb_free_size
<
1899 MLX5_MAX_LRO_HEADER_FIX
) {
1900 DRV_LOG(ERR
, "Not enough space in the first segment(%u)"
1901 " to include the max header size(%u) for LRO",
1902 first_mb_free_size
, MLX5_MAX_LRO_HEADER_FIX
);
1903 rte_errno
= ENOTSUP
;
1907 * Determine the number of SGEs needed for a full packet
1908 * and round it to the next power of two.
1910 sges_n
= log2above((size
/ mb_len
) + !!(size
% mb_len
));
1911 if (sges_n
> MLX5_MAX_LOG_RQ_SEGS
) {
1913 "port %u too many SGEs (%u) needed to handle"
1914 " requested maximum packet size %u, the maximum"
1915 " supported are %u", dev
->data
->port_id
,
1916 1 << sges_n
, max_rx_pkt_len
,
1917 1u << MLX5_MAX_LOG_RQ_SEGS
);
1918 rte_errno
= ENOTSUP
;
1921 tmpl
->rxq
.sges_n
= sges_n
;
1922 max_lro_size
= max_rx_pkt_len
;
1924 if (config
->mprq
.enabled
&& !mlx5_rxq_mprq_enabled(&tmpl
->rxq
))
1926 "port %u MPRQ is requested but cannot be enabled\n"
1927 " (requested: pkt_sz = %u, desc_num = %u,"
1928 " rxq_num = %u, stride_sz = %u, stride_num = %u\n"
1929 " supported: min_rxqs_num = %u,"
1930 " min_stride_sz = %u, max_stride_sz = %u).",
1931 dev
->data
->port_id
, non_scatter_min_mbuf_size
,
1933 config
->mprq
.stride_size_n
?
1934 (1U << config
->mprq
.stride_size_n
) :
1935 (1U << mprq_stride_size
),
1936 config
->mprq
.stride_num_n
?
1937 (1U << config
->mprq
.stride_num_n
) :
1938 (1U << mprq_stride_nums
),
1939 config
->mprq
.min_rxqs_num
,
1940 (1U << config
->mprq
.min_stride_size_n
),
1941 (1U << config
->mprq
.max_stride_size_n
));
1942 DRV_LOG(DEBUG
, "port %u maximum number of segments per packet: %u",
1943 dev
->data
->port_id
, 1 << tmpl
->rxq
.sges_n
);
1944 if (desc
% (1 << tmpl
->rxq
.sges_n
)) {
1946 "port %u number of Rx queue descriptors (%u) is not a"
1947 " multiple of SGEs per packet (%u)",
1950 1 << tmpl
->rxq
.sges_n
);
1954 mlx5_max_lro_msg_size_adjust(dev
, idx
, max_lro_size
);
1955 /* Toggle RX checksum offload if hardware supports it. */
1956 tmpl
->rxq
.csum
= !!(offloads
& DEV_RX_OFFLOAD_CHECKSUM
);
1957 tmpl
->rxq
.hw_timestamp
= !!(offloads
& DEV_RX_OFFLOAD_TIMESTAMP
);
1958 /* Configure VLAN stripping. */
1959 tmpl
->rxq
.vlan_strip
= !!(offloads
& DEV_RX_OFFLOAD_VLAN_STRIP
);
1960 /* By default, FCS (CRC) is stripped by hardware. */
1961 tmpl
->rxq
.crc_present
= 0;
1962 tmpl
->rxq
.lro
= lro_on_queue
;
1963 if (offloads
& DEV_RX_OFFLOAD_KEEP_CRC
) {
1964 if (config
->hw_fcs_strip
) {
1966 * RQs used for LRO-enabled TIRs should not be
1967 * configured to scatter the FCS.
1971 "port %u CRC stripping has been "
1972 "disabled but will still be performed "
1973 "by hardware, because LRO is enabled",
1974 dev
->data
->port_id
);
1976 tmpl
->rxq
.crc_present
= 1;
1979 "port %u CRC stripping has been disabled but will"
1980 " still be performed by hardware, make sure MLNX_OFED"
1981 " and firmware are up to date",
1982 dev
->data
->port_id
);
1986 "port %u CRC stripping is %s, %u bytes will be subtracted from"
1987 " incoming frames to hide it",
1989 tmpl
->rxq
.crc_present
? "disabled" : "enabled",
1990 tmpl
->rxq
.crc_present
<< 2);
1992 tmpl
->rxq
.rss_hash
= !!priv
->rss_conf
.rss_hf
&&
1993 (!!(dev
->data
->dev_conf
.rxmode
.mq_mode
& ETH_MQ_RX_RSS
));
1994 tmpl
->rxq
.port_id
= dev
->data
->port_id
;
1997 tmpl
->rxq
.elts_n
= log2above(desc
);
1998 tmpl
->rxq
.rq_repl_thresh
=
1999 MLX5_VPMD_RXQ_RPLNSH_THRESH(1 << tmpl
->rxq
.elts_n
);
2001 (struct rte_mbuf
*(*)[1 << tmpl
->rxq
.elts_n
])(tmpl
+ 1);
2003 tmpl
->rxq
.uar_lock_cq
= &priv
->uar_lock_cq
;
2005 tmpl
->rxq
.idx
= idx
;
2006 rte_atomic32_inc(&tmpl
->refcnt
);
2007 LIST_INSERT_HEAD(&priv
->rxqsctrl
, tmpl
, next
);
2015 * Create a DPDK Rx hairpin queue.
2018 * Pointer to Ethernet device.
2022 * Number of descriptors to configure in queue.
2023 * @param hairpin_conf
2024 * The hairpin binding configuration.
2027 * A DPDK queue object on success, NULL otherwise and rte_errno is set.
2029 struct mlx5_rxq_ctrl
*
2030 mlx5_rxq_hairpin_new(struct rte_eth_dev
*dev
, uint16_t idx
, uint16_t desc
,
2031 const struct rte_eth_hairpin_conf
*hairpin_conf
)
2033 struct mlx5_priv
*priv
= dev
->data
->dev_private
;
2034 struct mlx5_rxq_ctrl
*tmpl
;
2036 tmpl
= rte_calloc_socket("RXQ", 1, sizeof(*tmpl
), 0, SOCKET_ID_ANY
);
2041 tmpl
->type
= MLX5_RXQ_TYPE_HAIRPIN
;
2042 tmpl
->socket
= SOCKET_ID_ANY
;
2043 tmpl
->rxq
.rss_hash
= 0;
2044 tmpl
->rxq
.port_id
= dev
->data
->port_id
;
2046 tmpl
->rxq
.mp
= NULL
;
2047 tmpl
->rxq
.elts_n
= log2above(desc
);
2048 tmpl
->rxq
.elts
= NULL
;
2049 tmpl
->rxq
.mr_ctrl
.cache_bh
= (struct mlx5_mr_btree
) { 0 };
2050 tmpl
->hairpin_conf
= *hairpin_conf
;
2051 tmpl
->rxq
.idx
= idx
;
2052 rte_atomic32_inc(&tmpl
->refcnt
);
2053 LIST_INSERT_HEAD(&priv
->rxqsctrl
, tmpl
, next
);
2061 * Pointer to Ethernet device.
2066 * A pointer to the queue if it exists, NULL otherwise.
2068 struct mlx5_rxq_ctrl
*
2069 mlx5_rxq_get(struct rte_eth_dev
*dev
, uint16_t idx
)
2071 struct mlx5_priv
*priv
= dev
->data
->dev_private
;
2072 struct mlx5_rxq_ctrl
*rxq_ctrl
= NULL
;
2074 if ((*priv
->rxqs
)[idx
]) {
2075 rxq_ctrl
= container_of((*priv
->rxqs
)[idx
],
2076 struct mlx5_rxq_ctrl
,
2078 mlx5_rxq_obj_get(dev
, idx
);
2079 rte_atomic32_inc(&rxq_ctrl
->refcnt
);
2085 * Release a Rx queue.
2088 * Pointer to Ethernet device.
2093 * 1 while a reference on it exists, 0 when freed.
2096 mlx5_rxq_release(struct rte_eth_dev
*dev
, uint16_t idx
)
2098 struct mlx5_priv
*priv
= dev
->data
->dev_private
;
2099 struct mlx5_rxq_ctrl
*rxq_ctrl
;
2101 if (!(*priv
->rxqs
)[idx
])
2103 rxq_ctrl
= container_of((*priv
->rxqs
)[idx
], struct mlx5_rxq_ctrl
, rxq
);
2104 MLX5_ASSERT(rxq_ctrl
->priv
);
2105 if (rxq_ctrl
->obj
&& !mlx5_rxq_obj_release(rxq_ctrl
->obj
))
2106 rxq_ctrl
->obj
= NULL
;
2107 if (rte_atomic32_dec_and_test(&rxq_ctrl
->refcnt
)) {
2108 if (rxq_ctrl
->dbr_umem_id_valid
)
2109 claim_zero(mlx5_release_dbr(dev
, rxq_ctrl
->dbr_umem_id
,
2110 rxq_ctrl
->dbr_offset
));
2111 if (rxq_ctrl
->type
== MLX5_RXQ_TYPE_STANDARD
)
2112 mlx5_mr_btree_free(&rxq_ctrl
->rxq
.mr_ctrl
.cache_bh
);
2113 LIST_REMOVE(rxq_ctrl
, next
);
2115 (*priv
->rxqs
)[idx
] = NULL
;
2122 * Verify the Rx Queue list is empty
2125 * Pointer to Ethernet device.
2128 * The number of object not released.
2131 mlx5_rxq_verify(struct rte_eth_dev
*dev
)
2133 struct mlx5_priv
*priv
= dev
->data
->dev_private
;
2134 struct mlx5_rxq_ctrl
*rxq_ctrl
;
2137 LIST_FOREACH(rxq_ctrl
, &priv
->rxqsctrl
, next
) {
2138 DRV_LOG(DEBUG
, "port %u Rx Queue %u still referenced",
2139 dev
->data
->port_id
, rxq_ctrl
->rxq
.idx
);
2146 * Get a Rx queue type.
2149 * Pointer to Ethernet device.
2154 * The Rx queue type.
2157 mlx5_rxq_get_type(struct rte_eth_dev
*dev
, uint16_t idx
)
2159 struct mlx5_priv
*priv
= dev
->data
->dev_private
;
2160 struct mlx5_rxq_ctrl
*rxq_ctrl
= NULL
;
2162 if (idx
< priv
->rxqs_n
&& (*priv
->rxqs
)[idx
]) {
2163 rxq_ctrl
= container_of((*priv
->rxqs
)[idx
],
2164 struct mlx5_rxq_ctrl
,
2166 return rxq_ctrl
->type
;
2168 return MLX5_RXQ_TYPE_UNDEFINED
;
2172 * Create an indirection table.
2175 * Pointer to Ethernet device.
2177 * Queues entering in the indirection table.
2179 * Number of queues in the array.
2182 * The Verbs/DevX object initialised, NULL otherwise and rte_errno is set.
2184 static struct mlx5_ind_table_obj
*
2185 mlx5_ind_table_obj_new(struct rte_eth_dev
*dev
, const uint16_t *queues
,
2186 uint32_t queues_n
, enum mlx5_ind_tbl_type type
)
2188 struct mlx5_priv
*priv
= dev
->data
->dev_private
;
2189 struct mlx5_ind_table_obj
*ind_tbl
;
2190 unsigned int i
= 0, j
= 0, k
= 0;
2192 ind_tbl
= rte_calloc(__func__
, 1, sizeof(*ind_tbl
) +
2193 queues_n
* sizeof(uint16_t), 0);
2198 ind_tbl
->type
= type
;
2199 if (ind_tbl
->type
== MLX5_IND_TBL_TYPE_IBV
) {
2200 const unsigned int wq_n
= rte_is_power_of_2(queues_n
) ?
2201 log2above(queues_n
) :
2202 log2above(priv
->config
.ind_table_max_size
);
2203 struct ibv_wq
*wq
[1 << wq_n
];
2205 for (i
= 0; i
!= queues_n
; ++i
) {
2206 struct mlx5_rxq_ctrl
*rxq
= mlx5_rxq_get(dev
,
2210 wq
[i
] = rxq
->obj
->wq
;
2211 ind_tbl
->queues
[i
] = queues
[i
];
2213 ind_tbl
->queues_n
= queues_n
;
2214 /* Finalise indirection table. */
2215 k
= i
; /* Retain value of i for use in error case. */
2216 for (j
= 0; k
!= (unsigned int)(1 << wq_n
); ++k
, ++j
)
2218 ind_tbl
->ind_table
= mlx5_glue
->create_rwq_ind_table
2220 &(struct ibv_rwq_ind_table_init_attr
){
2221 .log_ind_tbl_size
= wq_n
,
2225 if (!ind_tbl
->ind_table
) {
2229 } else { /* ind_tbl->type == MLX5_IND_TBL_TYPE_DEVX */
2230 struct mlx5_devx_rqt_attr
*rqt_attr
= NULL
;
2231 const unsigned int rqt_n
=
2232 1 << (rte_is_power_of_2(queues_n
) ?
2233 log2above(queues_n
) :
2234 log2above(priv
->config
.ind_table_max_size
));
2236 rqt_attr
= rte_calloc(__func__
, 1, sizeof(*rqt_attr
) +
2237 rqt_n
* sizeof(uint32_t), 0);
2239 DRV_LOG(ERR
, "port %u cannot allocate RQT resources",
2240 dev
->data
->port_id
);
2244 rqt_attr
->rqt_max_size
= priv
->config
.ind_table_max_size
;
2245 rqt_attr
->rqt_actual_size
= rqt_n
;
2246 for (i
= 0; i
!= queues_n
; ++i
) {
2247 struct mlx5_rxq_ctrl
*rxq
= mlx5_rxq_get(dev
,
2251 rqt_attr
->rq_list
[i
] = rxq
->obj
->rq
->id
;
2252 ind_tbl
->queues
[i
] = queues
[i
];
2254 k
= i
; /* Retain value of i for use in error case. */
2255 for (j
= 0; k
!= rqt_n
; ++k
, ++j
)
2256 rqt_attr
->rq_list
[k
] = rqt_attr
->rq_list
[j
];
2257 ind_tbl
->rqt
= mlx5_devx_cmd_create_rqt(priv
->sh
->ctx
,
2260 if (!ind_tbl
->rqt
) {
2261 DRV_LOG(ERR
, "port %u cannot create DevX RQT",
2262 dev
->data
->port_id
);
2266 ind_tbl
->queues_n
= queues_n
;
2268 rte_atomic32_inc(&ind_tbl
->refcnt
);
2269 LIST_INSERT_HEAD(&priv
->ind_tbls
, ind_tbl
, next
);
2272 for (j
= 0; j
< i
; j
++)
2273 mlx5_rxq_release(dev
, ind_tbl
->queues
[j
]);
2275 DEBUG("port %u cannot create indirection table", dev
->data
->port_id
);
2280 * Get an indirection table.
2283 * Pointer to Ethernet device.
2285 * Queues entering in the indirection table.
2287 * Number of queues in the array.
2290 * An indirection table if found.
2292 static struct mlx5_ind_table_obj
*
2293 mlx5_ind_table_obj_get(struct rte_eth_dev
*dev
, const uint16_t *queues
,
2296 struct mlx5_priv
*priv
= dev
->data
->dev_private
;
2297 struct mlx5_ind_table_obj
*ind_tbl
;
2299 LIST_FOREACH(ind_tbl
, &priv
->ind_tbls
, next
) {
2300 if ((ind_tbl
->queues_n
== queues_n
) &&
2301 (memcmp(ind_tbl
->queues
, queues
,
2302 ind_tbl
->queues_n
* sizeof(ind_tbl
->queues
[0]))
2309 rte_atomic32_inc(&ind_tbl
->refcnt
);
2310 for (i
= 0; i
!= ind_tbl
->queues_n
; ++i
)
2311 mlx5_rxq_get(dev
, ind_tbl
->queues
[i
]);
2317 * Release an indirection table.
2320 * Pointer to Ethernet device.
2322 * Indirection table to release.
2325 * 1 while a reference on it exists, 0 when freed.
2328 mlx5_ind_table_obj_release(struct rte_eth_dev
*dev
,
2329 struct mlx5_ind_table_obj
*ind_tbl
)
2333 if (rte_atomic32_dec_and_test(&ind_tbl
->refcnt
)) {
2334 if (ind_tbl
->type
== MLX5_IND_TBL_TYPE_IBV
)
2335 claim_zero(mlx5_glue
->destroy_rwq_ind_table
2336 (ind_tbl
->ind_table
));
2337 else if (ind_tbl
->type
== MLX5_IND_TBL_TYPE_DEVX
)
2338 claim_zero(mlx5_devx_cmd_destroy(ind_tbl
->rqt
));
2340 for (i
= 0; i
!= ind_tbl
->queues_n
; ++i
)
2341 claim_nonzero(mlx5_rxq_release(dev
, ind_tbl
->queues
[i
]));
2342 if (!rte_atomic32_read(&ind_tbl
->refcnt
)) {
2343 LIST_REMOVE(ind_tbl
, next
);
2351 * Verify the Rx Queue list is empty
2354 * Pointer to Ethernet device.
2357 * The number of object not released.
2360 mlx5_ind_table_obj_verify(struct rte_eth_dev
*dev
)
2362 struct mlx5_priv
*priv
= dev
->data
->dev_private
;
2363 struct mlx5_ind_table_obj
*ind_tbl
;
2366 LIST_FOREACH(ind_tbl
, &priv
->ind_tbls
, next
) {
2368 "port %u indirection table obj %p still referenced",
2369 dev
->data
->port_id
, (void *)ind_tbl
);
2376 * Create an Rx Hash queue.
2379 * Pointer to Ethernet device.
2381 * RSS key for the Rx hash queue.
2382 * @param rss_key_len
2384 * @param hash_fields
2385 * Verbs protocol hash field to make the RSS on.
2387 * Queues entering in hash queue. In case of empty hash_fields only the
2388 * first queue index will be taken for the indirection table.
2395 * The Verbs/DevX object initialised index, 0 otherwise and rte_errno is set.
2398 mlx5_hrxq_new(struct rte_eth_dev
*dev
,
2399 const uint8_t *rss_key
, uint32_t rss_key_len
,
2400 uint64_t hash_fields
,
2401 const uint16_t *queues
, uint32_t queues_n
,
2402 int tunnel __rte_unused
)
2404 struct mlx5_priv
*priv
= dev
->data
->dev_private
;
2405 struct mlx5_hrxq
*hrxq
;
2406 uint32_t hrxq_idx
= 0;
2407 struct ibv_qp
*qp
= NULL
;
2408 struct mlx5_ind_table_obj
*ind_tbl
;
2410 struct mlx5_devx_obj
*tir
= NULL
;
2411 struct mlx5_rxq_data
*rxq_data
= (*priv
->rxqs
)[queues
[0]];
2412 struct mlx5_rxq_ctrl
*rxq_ctrl
=
2413 container_of(rxq_data
, struct mlx5_rxq_ctrl
, rxq
);
2415 queues_n
= hash_fields
? queues_n
: 1;
2416 ind_tbl
= mlx5_ind_table_obj_get(dev
, queues
, queues_n
);
2418 enum mlx5_ind_tbl_type type
;
2420 type
= rxq_ctrl
->obj
->type
== MLX5_RXQ_OBJ_TYPE_IBV
?
2421 MLX5_IND_TBL_TYPE_IBV
: MLX5_IND_TBL_TYPE_DEVX
;
2422 ind_tbl
= mlx5_ind_table_obj_new(dev
, queues
, queues_n
, type
);
2428 if (ind_tbl
->type
== MLX5_IND_TBL_TYPE_IBV
) {
2429 #ifdef HAVE_IBV_DEVICE_TUNNEL_SUPPORT
2430 struct mlx5dv_qp_init_attr qp_init_attr
;
2432 memset(&qp_init_attr
, 0, sizeof(qp_init_attr
));
2434 qp_init_attr
.comp_mask
=
2435 MLX5DV_QP_INIT_ATTR_MASK_QP_CREATE_FLAGS
;
2436 qp_init_attr
.create_flags
=
2437 MLX5DV_QP_CREATE_TUNNEL_OFFLOADS
;
2439 #ifdef HAVE_IBV_FLOW_DV_SUPPORT
2440 if (dev
->data
->dev_conf
.lpbk_mode
) {
2442 * Allow packet sent from NIC loop back
2443 * w/o source MAC check.
2445 qp_init_attr
.comp_mask
|=
2446 MLX5DV_QP_INIT_ATTR_MASK_QP_CREATE_FLAGS
;
2447 qp_init_attr
.create_flags
|=
2448 MLX5DV_QP_CREATE_TIR_ALLOW_SELF_LOOPBACK_UC
;
2451 qp
= mlx5_glue
->dv_create_qp
2453 &(struct ibv_qp_init_attr_ex
){
2454 .qp_type
= IBV_QPT_RAW_PACKET
,
2456 IBV_QP_INIT_ATTR_PD
|
2457 IBV_QP_INIT_ATTR_IND_TABLE
|
2458 IBV_QP_INIT_ATTR_RX_HASH
,
2459 .rx_hash_conf
= (struct ibv_rx_hash_conf
){
2461 IBV_RX_HASH_FUNC_TOEPLITZ
,
2462 .rx_hash_key_len
= rss_key_len
,
2464 (void *)(uintptr_t)rss_key
,
2465 .rx_hash_fields_mask
= hash_fields
,
2467 .rwq_ind_tbl
= ind_tbl
->ind_table
,
2472 qp
= mlx5_glue
->create_qp_ex
2474 &(struct ibv_qp_init_attr_ex
){
2475 .qp_type
= IBV_QPT_RAW_PACKET
,
2477 IBV_QP_INIT_ATTR_PD
|
2478 IBV_QP_INIT_ATTR_IND_TABLE
|
2479 IBV_QP_INIT_ATTR_RX_HASH
,
2480 .rx_hash_conf
= (struct ibv_rx_hash_conf
){
2482 IBV_RX_HASH_FUNC_TOEPLITZ
,
2483 .rx_hash_key_len
= rss_key_len
,
2485 (void *)(uintptr_t)rss_key
,
2486 .rx_hash_fields_mask
= hash_fields
,
2488 .rwq_ind_tbl
= ind_tbl
->ind_table
,
2496 } else { /* ind_tbl->type == MLX5_IND_TBL_TYPE_DEVX */
2497 struct mlx5_devx_tir_attr tir_attr
;
2501 /* Enable TIR LRO only if all the queues were configured for. */
2502 for (i
= 0; i
< queues_n
; ++i
) {
2503 if (!(*priv
->rxqs
)[queues
[i
]]->lro
) {
2508 memset(&tir_attr
, 0, sizeof(tir_attr
));
2509 tir_attr
.disp_type
= MLX5_TIRC_DISP_TYPE_INDIRECT
;
2510 tir_attr
.rx_hash_fn
= MLX5_RX_HASH_FN_TOEPLITZ
;
2511 tir_attr
.tunneled_offload_en
= !!tunnel
;
2512 /* If needed, translate hash_fields bitmap to PRM format. */
2514 #ifdef HAVE_IBV_DEVICE_TUNNEL_SUPPORT
2515 struct mlx5_rx_hash_field_select
*rx_hash_field_select
=
2516 hash_fields
& IBV_RX_HASH_INNER
?
2517 &tir_attr
.rx_hash_field_selector_inner
:
2518 &tir_attr
.rx_hash_field_selector_outer
;
2520 struct mlx5_rx_hash_field_select
*rx_hash_field_select
=
2521 &tir_attr
.rx_hash_field_selector_outer
;
2524 /* 1 bit: 0: IPv4, 1: IPv6. */
2525 rx_hash_field_select
->l3_prot_type
=
2526 !!(hash_fields
& MLX5_IPV6_IBV_RX_HASH
);
2527 /* 1 bit: 0: TCP, 1: UDP. */
2528 rx_hash_field_select
->l4_prot_type
=
2529 !!(hash_fields
& MLX5_UDP_IBV_RX_HASH
);
2530 /* Bitmask which sets which fields to use in RX Hash. */
2531 rx_hash_field_select
->selected_fields
=
2532 ((!!(hash_fields
& MLX5_L3_SRC_IBV_RX_HASH
)) <<
2533 MLX5_RX_HASH_FIELD_SELECT_SELECTED_FIELDS_SRC_IP
) |
2534 (!!(hash_fields
& MLX5_L3_DST_IBV_RX_HASH
)) <<
2535 MLX5_RX_HASH_FIELD_SELECT_SELECTED_FIELDS_DST_IP
|
2536 (!!(hash_fields
& MLX5_L4_SRC_IBV_RX_HASH
)) <<
2537 MLX5_RX_HASH_FIELD_SELECT_SELECTED_FIELDS_L4_SPORT
|
2538 (!!(hash_fields
& MLX5_L4_DST_IBV_RX_HASH
)) <<
2539 MLX5_RX_HASH_FIELD_SELECT_SELECTED_FIELDS_L4_DPORT
;
2541 if (rxq_ctrl
->obj
->type
== MLX5_RXQ_OBJ_TYPE_DEVX_HAIRPIN
)
2542 tir_attr
.transport_domain
= priv
->sh
->td
->id
;
2544 tir_attr
.transport_domain
= priv
->sh
->tdn
;
2545 memcpy(tir_attr
.rx_hash_toeplitz_key
, rss_key
,
2546 MLX5_RSS_HASH_KEY_LEN
);
2547 tir_attr
.indirect_table
= ind_tbl
->rqt
->id
;
2548 if (dev
->data
->dev_conf
.lpbk_mode
)
2549 tir_attr
.self_lb_block
=
2550 MLX5_TIRC_SELF_LB_BLOCK_BLOCK_UNICAST
;
2552 tir_attr
.lro_timeout_period_usecs
=
2553 priv
->config
.lro
.timeout
;
2554 tir_attr
.lro_max_msg_sz
= priv
->max_lro_msg_size
;
2555 tir_attr
.lro_enable_mask
=
2556 MLX5_TIRC_LRO_ENABLE_MASK_IPV4_LRO
|
2557 MLX5_TIRC_LRO_ENABLE_MASK_IPV6_LRO
;
2559 tir
= mlx5_devx_cmd_create_tir(priv
->sh
->ctx
, &tir_attr
);
2561 DRV_LOG(ERR
, "port %u cannot create DevX TIR",
2562 dev
->data
->port_id
);
2567 hrxq
= mlx5_ipool_zmalloc(priv
->sh
->ipool
[MLX5_IPOOL_HRXQ
], &hrxq_idx
);
2570 hrxq
->ind_table
= ind_tbl
;
2571 if (ind_tbl
->type
== MLX5_IND_TBL_TYPE_IBV
) {
2573 #ifdef HAVE_IBV_FLOW_DV_SUPPORT
2575 mlx5_glue
->dv_create_flow_action_dest_ibv_qp(hrxq
->qp
);
2576 if (!hrxq
->action
) {
2581 } else { /* ind_tbl->type == MLX5_IND_TBL_TYPE_DEVX */
2583 #ifdef HAVE_IBV_FLOW_DV_SUPPORT
2584 hrxq
->action
= mlx5_glue
->dv_create_flow_action_dest_devx_tir
2586 if (!hrxq
->action
) {
2592 hrxq
->rss_key_len
= rss_key_len
;
2593 hrxq
->hash_fields
= hash_fields
;
2594 memcpy(hrxq
->rss_key
, rss_key
, rss_key_len
);
2595 rte_atomic32_inc(&hrxq
->refcnt
);
2596 ILIST_INSERT(priv
->sh
->ipool
[MLX5_IPOOL_HRXQ
], &priv
->hrxqs
, hrxq_idx
,
2600 err
= rte_errno
; /* Save rte_errno before cleanup. */
2601 mlx5_ind_table_obj_release(dev
, ind_tbl
);
2603 claim_zero(mlx5_glue
->destroy_qp(qp
));
2605 claim_zero(mlx5_devx_cmd_destroy(tir
));
2606 rte_errno
= err
; /* Restore rte_errno. */
2611 * Get an Rx Hash queue.
2614 * Pointer to Ethernet device.
2616 * RSS configuration for the Rx hash queue.
2618 * Queues entering in hash queue. In case of empty hash_fields only the
2619 * first queue index will be taken for the indirection table.
2624 * An hash Rx queue index on success.
2627 mlx5_hrxq_get(struct rte_eth_dev
*dev
,
2628 const uint8_t *rss_key
, uint32_t rss_key_len
,
2629 uint64_t hash_fields
,
2630 const uint16_t *queues
, uint32_t queues_n
)
2632 struct mlx5_priv
*priv
= dev
->data
->dev_private
;
2633 struct mlx5_hrxq
*hrxq
;
2636 queues_n
= hash_fields
? queues_n
: 1;
2637 ILIST_FOREACH(priv
->sh
->ipool
[MLX5_IPOOL_HRXQ
], priv
->hrxqs
, idx
,
2639 struct mlx5_ind_table_obj
*ind_tbl
;
2641 if (hrxq
->rss_key_len
!= rss_key_len
)
2643 if (memcmp(hrxq
->rss_key
, rss_key
, rss_key_len
))
2645 if (hrxq
->hash_fields
!= hash_fields
)
2647 ind_tbl
= mlx5_ind_table_obj_get(dev
, queues
, queues_n
);
2650 if (ind_tbl
!= hrxq
->ind_table
) {
2651 mlx5_ind_table_obj_release(dev
, ind_tbl
);
2654 rte_atomic32_inc(&hrxq
->refcnt
);
2661 * Release the hash Rx queue.
2664 * Pointer to Ethernet device.
2666 * Index to Hash Rx queue to release.
2669 * 1 while a reference on it exists, 0 when freed.
2672 mlx5_hrxq_release(struct rte_eth_dev
*dev
, uint32_t hrxq_idx
)
2674 struct mlx5_priv
*priv
= dev
->data
->dev_private
;
2675 struct mlx5_hrxq
*hrxq
;
2677 hrxq
= mlx5_ipool_get(priv
->sh
->ipool
[MLX5_IPOOL_HRXQ
], hrxq_idx
);
2680 if (rte_atomic32_dec_and_test(&hrxq
->refcnt
)) {
2681 #ifdef HAVE_IBV_FLOW_DV_SUPPORT
2682 mlx5_glue
->destroy_flow_action(hrxq
->action
);
2684 if (hrxq
->ind_table
->type
== MLX5_IND_TBL_TYPE_IBV
)
2685 claim_zero(mlx5_glue
->destroy_qp(hrxq
->qp
));
2686 else /* hrxq->ind_table->type == MLX5_IND_TBL_TYPE_DEVX */
2687 claim_zero(mlx5_devx_cmd_destroy(hrxq
->tir
));
2688 mlx5_ind_table_obj_release(dev
, hrxq
->ind_table
);
2689 ILIST_REMOVE(priv
->sh
->ipool
[MLX5_IPOOL_HRXQ
], &priv
->hrxqs
,
2690 hrxq_idx
, hrxq
, next
);
2691 mlx5_ipool_free(priv
->sh
->ipool
[MLX5_IPOOL_HRXQ
], hrxq_idx
);
2694 claim_nonzero(mlx5_ind_table_obj_release(dev
, hrxq
->ind_table
));
2699 * Verify the Rx Queue list is empty
2702 * Pointer to Ethernet device.
2705 * The number of object not released.
2708 mlx5_hrxq_verify(struct rte_eth_dev
*dev
)
2710 struct mlx5_priv
*priv
= dev
->data
->dev_private
;
2711 struct mlx5_hrxq
*hrxq
;
2715 ILIST_FOREACH(priv
->sh
->ipool
[MLX5_IPOOL_HRXQ
], priv
->hrxqs
, idx
,
2718 "port %u hash Rx queue %p still referenced",
2719 dev
->data
->port_id
, (void *)hrxq
);
2726 * Create a drop Rx queue Verbs/DevX object.
2729 * Pointer to Ethernet device.
2732 * The Verbs/DevX object initialised, NULL otherwise and rte_errno is set.
2734 static struct mlx5_rxq_obj
*
2735 mlx5_rxq_obj_drop_new(struct rte_eth_dev
*dev
)
2737 struct mlx5_priv
*priv
= dev
->data
->dev_private
;
2738 struct ibv_context
*ctx
= priv
->sh
->ctx
;
2740 struct ibv_wq
*wq
= NULL
;
2741 struct mlx5_rxq_obj
*rxq
;
2743 if (priv
->drop_queue
.rxq
)
2744 return priv
->drop_queue
.rxq
;
2745 cq
= mlx5_glue
->create_cq(ctx
, 1, NULL
, NULL
, 0);
2747 DEBUG("port %u cannot allocate CQ for drop queue",
2748 dev
->data
->port_id
);
2752 wq
= mlx5_glue
->create_wq(ctx
,
2753 &(struct ibv_wq_init_attr
){
2754 .wq_type
= IBV_WQT_RQ
,
2761 DEBUG("port %u cannot allocate WQ for drop queue",
2762 dev
->data
->port_id
);
2766 rxq
= rte_calloc(__func__
, 1, sizeof(*rxq
), 0);
2768 DEBUG("port %u cannot allocate drop Rx queue memory",
2769 dev
->data
->port_id
);
2775 priv
->drop_queue
.rxq
= rxq
;
2779 claim_zero(mlx5_glue
->destroy_wq(wq
));
2781 claim_zero(mlx5_glue
->destroy_cq(cq
));
2786 * Release a drop Rx queue Verbs/DevX object.
2789 * Pointer to Ethernet device.
2792 * The Verbs/DevX object initialised, NULL otherwise and rte_errno is set.
2795 mlx5_rxq_obj_drop_release(struct rte_eth_dev
*dev
)
2797 struct mlx5_priv
*priv
= dev
->data
->dev_private
;
2798 struct mlx5_rxq_obj
*rxq
= priv
->drop_queue
.rxq
;
2801 claim_zero(mlx5_glue
->destroy_wq(rxq
->wq
));
2803 claim_zero(mlx5_glue
->destroy_cq(rxq
->cq
));
2805 priv
->drop_queue
.rxq
= NULL
;
2809 * Create a drop indirection table.
2812 * Pointer to Ethernet device.
2815 * The Verbs/DevX object initialised, NULL otherwise and rte_errno is set.
2817 static struct mlx5_ind_table_obj
*
2818 mlx5_ind_table_obj_drop_new(struct rte_eth_dev
*dev
)
2820 struct mlx5_priv
*priv
= dev
->data
->dev_private
;
2821 struct mlx5_ind_table_obj
*ind_tbl
;
2822 struct mlx5_rxq_obj
*rxq
;
2823 struct mlx5_ind_table_obj tmpl
;
2825 rxq
= mlx5_rxq_obj_drop_new(dev
);
2828 tmpl
.ind_table
= mlx5_glue
->create_rwq_ind_table
2830 &(struct ibv_rwq_ind_table_init_attr
){
2831 .log_ind_tbl_size
= 0,
2832 .ind_tbl
= &rxq
->wq
,
2835 if (!tmpl
.ind_table
) {
2836 DEBUG("port %u cannot allocate indirection table for drop"
2838 dev
->data
->port_id
);
2842 ind_tbl
= rte_calloc(__func__
, 1, sizeof(*ind_tbl
), 0);
2847 ind_tbl
->ind_table
= tmpl
.ind_table
;
2850 mlx5_rxq_obj_drop_release(dev
);
2855 * Release a drop indirection table.
2858 * Pointer to Ethernet device.
2861 mlx5_ind_table_obj_drop_release(struct rte_eth_dev
*dev
)
2863 struct mlx5_priv
*priv
= dev
->data
->dev_private
;
2864 struct mlx5_ind_table_obj
*ind_tbl
= priv
->drop_queue
.hrxq
->ind_table
;
2866 claim_zero(mlx5_glue
->destroy_rwq_ind_table(ind_tbl
->ind_table
));
2867 mlx5_rxq_obj_drop_release(dev
);
2869 priv
->drop_queue
.hrxq
->ind_table
= NULL
;
2873 * Create a drop Rx Hash queue.
2876 * Pointer to Ethernet device.
2879 * The Verbs/DevX object initialised, NULL otherwise and rte_errno is set.
2882 mlx5_hrxq_drop_new(struct rte_eth_dev
*dev
)
2884 struct mlx5_priv
*priv
= dev
->data
->dev_private
;
2885 struct mlx5_ind_table_obj
*ind_tbl
= NULL
;
2886 struct ibv_qp
*qp
= NULL
;
2887 struct mlx5_hrxq
*hrxq
= NULL
;
2889 if (priv
->drop_queue
.hrxq
) {
2890 rte_atomic32_inc(&priv
->drop_queue
.hrxq
->refcnt
);
2891 return priv
->drop_queue
.hrxq
;
2893 hrxq
= rte_calloc(__func__
, 1, sizeof(*hrxq
), 0);
2896 "port %u cannot allocate memory for drop queue",
2897 dev
->data
->port_id
);
2901 priv
->drop_queue
.hrxq
= hrxq
;
2902 ind_tbl
= mlx5_ind_table_obj_drop_new(dev
);
2905 hrxq
->ind_table
= ind_tbl
;
2906 qp
= mlx5_glue
->create_qp_ex(priv
->sh
->ctx
,
2907 &(struct ibv_qp_init_attr_ex
){
2908 .qp_type
= IBV_QPT_RAW_PACKET
,
2910 IBV_QP_INIT_ATTR_PD
|
2911 IBV_QP_INIT_ATTR_IND_TABLE
|
2912 IBV_QP_INIT_ATTR_RX_HASH
,
2913 .rx_hash_conf
= (struct ibv_rx_hash_conf
){
2915 IBV_RX_HASH_FUNC_TOEPLITZ
,
2916 .rx_hash_key_len
= MLX5_RSS_HASH_KEY_LEN
,
2917 .rx_hash_key
= rss_hash_default_key
,
2918 .rx_hash_fields_mask
= 0,
2920 .rwq_ind_tbl
= ind_tbl
->ind_table
,
2924 DEBUG("port %u cannot allocate QP for drop queue",
2925 dev
->data
->port_id
);
2930 #ifdef HAVE_IBV_FLOW_DV_SUPPORT
2931 hrxq
->action
= mlx5_glue
->dv_create_flow_action_dest_ibv_qp(hrxq
->qp
);
2932 if (!hrxq
->action
) {
2937 rte_atomic32_set(&hrxq
->refcnt
, 1);
2940 #ifdef HAVE_IBV_FLOW_DV_SUPPORT
2941 if (hrxq
&& hrxq
->action
)
2942 mlx5_glue
->destroy_flow_action(hrxq
->action
);
2945 claim_zero(mlx5_glue
->destroy_qp(hrxq
->qp
));
2947 mlx5_ind_table_obj_drop_release(dev
);
2949 priv
->drop_queue
.hrxq
= NULL
;
2956 * Release a drop hash Rx queue.
2959 * Pointer to Ethernet device.
2962 mlx5_hrxq_drop_release(struct rte_eth_dev
*dev
)
2964 struct mlx5_priv
*priv
= dev
->data
->dev_private
;
2965 struct mlx5_hrxq
*hrxq
= priv
->drop_queue
.hrxq
;
2967 if (rte_atomic32_dec_and_test(&hrxq
->refcnt
)) {
2968 #ifdef HAVE_IBV_FLOW_DV_SUPPORT
2969 mlx5_glue
->destroy_flow_action(hrxq
->action
);
2971 claim_zero(mlx5_glue
->destroy_qp(hrxq
->qp
));
2972 mlx5_ind_table_obj_drop_release(dev
);
2974 priv
->drop_queue
.hrxq
= NULL
;