]> git.proxmox.com Git - ceph.git/blame - ceph/src/seastar/dpdk/drivers/net/mlx5/mlx5_rxq.c
import 15.2.0 Octopus source
[ceph.git] / ceph / src / seastar / dpdk / drivers / net / mlx5 / mlx5_rxq.c
CommitLineData
9f95a23c
TL
1/* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright 2015 6WIND S.A.
3 * Copyright 2015 Mellanox Technologies, Ltd
7c673cae
FG
4 */
5
6#include <stddef.h>
7#include <assert.h>
8#include <errno.h>
9#include <string.h>
10#include <stdint.h>
11fdf7f2 11#include <fcntl.h>
9f95a23c 12#include <sys/queue.h>
7c673cae
FG
13
14/* Verbs header. */
15/* ISO C doesn't support unnamed structs/unions, disabling -pedantic. */
16#ifdef PEDANTIC
17#pragma GCC diagnostic ignored "-Wpedantic"
18#endif
19#include <infiniband/verbs.h>
9f95a23c 20#include <infiniband/mlx5dv.h>
7c673cae
FG
21#ifdef PEDANTIC
22#pragma GCC diagnostic error "-Wpedantic"
23#endif
24
7c673cae
FG
25#include <rte_mbuf.h>
26#include <rte_malloc.h>
9f95a23c 27#include <rte_ethdev_driver.h>
7c673cae 28#include <rte_common.h>
11fdf7f2
TL
29#include <rte_interrupts.h>
30#include <rte_debug.h>
9f95a23c 31#include <rte_io.h>
7c673cae
FG
32
33#include "mlx5.h"
34#include "mlx5_rxtx.h"
35#include "mlx5_utils.h"
36#include "mlx5_autoconf.h"
37#include "mlx5_defs.h"
9f95a23c 38#include "mlx5_glue.h"
7c673cae
FG
39
40/* Default RSS hash key also used for ConnectX-3. */
41uint8_t rss_hash_default_key[] = {
42 0x2c, 0xc6, 0x81, 0xd1,
43 0x5b, 0xdb, 0xf4, 0xf7,
44 0xfc, 0xa2, 0x83, 0x19,
45 0xdb, 0x1a, 0x3e, 0x94,
46 0x6b, 0x9e, 0x38, 0xd9,
47 0x2c, 0x9c, 0x03, 0xd1,
48 0xad, 0x99, 0x44, 0xa7,
49 0xd9, 0x56, 0x3d, 0x59,
50 0x06, 0x3c, 0x25, 0xf3,
51 0xfc, 0x1f, 0xdc, 0x2a,
52};
53
54/* Length of the default RSS hash key. */
9f95a23c
TL
55static_assert(MLX5_RSS_HASH_KEY_LEN ==
56 (unsigned int)sizeof(rss_hash_default_key),
57 "wrong RSS default key size.");
7c673cae
FG
58
59/**
9f95a23c 60 * Check whether Multi-Packet RQ can be enabled for the device.
7c673cae 61 *
9f95a23c
TL
62 * @param dev
63 * Pointer to Ethernet device.
7c673cae
FG
64 *
65 * @return
9f95a23c 66 * 1 if supported, negative errno value if not.
7c673cae 67 */
9f95a23c
TL
68inline int
69mlx5_check_mprq_support(struct rte_eth_dev *dev)
7c673cae 70{
9f95a23c 71 struct mlx5_priv *priv = dev->data->dev_private;
7c673cae 72
9f95a23c
TL
73 if (priv->config.mprq.enabled &&
74 priv->rxqs_n >= priv->config.mprq.min_rxqs_num)
75 return 1;
76 return -ENOTSUP;
7c673cae
FG
77}
78
79/**
9f95a23c 80 * Check whether Multi-Packet RQ is enabled for the Rx queue.
7c673cae 81 *
9f95a23c
TL
82 * @param rxq
83 * Pointer to receive queue structure.
7c673cae
FG
84 *
85 * @return
9f95a23c 86 * 0 if disabled, otherwise enabled.
7c673cae 87 */
9f95a23c
TL
88inline int
89mlx5_rxq_mprq_enabled(struct mlx5_rxq_data *rxq)
7c673cae 90{
9f95a23c 91 return rxq->strd_num_n > 0;
7c673cae
FG
92}
93
94/**
9f95a23c 95 * Check whether Multi-Packet RQ is enabled for the device.
7c673cae 96 *
9f95a23c
TL
97 * @param dev
98 * Pointer to Ethernet device.
7c673cae
FG
99 *
100 * @return
9f95a23c 101 * 0 if disabled, otherwise enabled.
7c673cae 102 */
9f95a23c
TL
103inline int
104mlx5_mprq_enabled(struct rte_eth_dev *dev)
7c673cae 105{
9f95a23c
TL
106 struct mlx5_priv *priv = dev->data->dev_private;
107 uint16_t i;
108 uint16_t n = 0;
7c673cae 109
9f95a23c 110 if (mlx5_check_mprq_support(dev) < 0)
7c673cae 111 return 0;
9f95a23c
TL
112 /* All the configured queues should be enabled. */
113 for (i = 0; i < priv->rxqs_n; ++i) {
114 struct mlx5_rxq_data *rxq = (*priv->rxqs)[i];
115
116 if (!rxq)
117 continue;
118 if (mlx5_rxq_mprq_enabled(rxq))
119 ++n;
7c673cae 120 }
9f95a23c
TL
121 /* Multi-Packet RQ can't be partially configured. */
122 assert(n == 0 || n == priv->rxqs_n);
123 return n == priv->rxqs_n;
7c673cae
FG
124}
125
126/**
9f95a23c 127 * Allocate RX queue elements for Multi-Packet RQ.
7c673cae 128 *
9f95a23c
TL
129 * @param rxq_ctrl
130 * Pointer to RX queue structure.
7c673cae
FG
131 *
132 * @return
9f95a23c 133 * 0 on success, a negative errno value otherwise and rte_errno is set.
7c673cae 134 */
9f95a23c
TL
135static int
136rxq_alloc_elts_mprq(struct mlx5_rxq_ctrl *rxq_ctrl)
7c673cae 137{
9f95a23c
TL
138 struct mlx5_rxq_data *rxq = &rxq_ctrl->rxq;
139 unsigned int wqe_n = 1 << rxq->elts_n;
140 unsigned int i;
141 int err;
7c673cae 142
9f95a23c
TL
143 /* Iterate on segments. */
144 for (i = 0; i <= wqe_n; ++i) {
145 struct mlx5_mprq_buf *buf;
7c673cae 146
9f95a23c
TL
147 if (rte_mempool_get(rxq->mprq_mp, (void **)&buf) < 0) {
148 DRV_LOG(ERR, "port %u empty mbuf pool", rxq->port_id);
149 rte_errno = ENOMEM;
150 goto error;
7c673cae 151 }
9f95a23c
TL
152 if (i < wqe_n)
153 (*rxq->mprq_bufs)[i] = buf;
154 else
155 rxq->mprq_repl = buf;
156 }
157 DRV_LOG(DEBUG,
158 "port %u Rx queue %u allocated and configured %u segments",
159 rxq->port_id, rxq->idx, wqe_n);
7c673cae 160 return 0;
9f95a23c
TL
161error:
162 err = rte_errno; /* Save rte_errno before cleanup. */
163 wqe_n = i;
164 for (i = 0; (i != wqe_n); ++i) {
165 if ((*rxq->mprq_bufs)[i] != NULL)
166 rte_mempool_put(rxq->mprq_mp,
167 (*rxq->mprq_bufs)[i]);
168 (*rxq->mprq_bufs)[i] = NULL;
169 }
170 DRV_LOG(DEBUG, "port %u Rx queue %u failed, freed everything",
171 rxq->port_id, rxq->idx);
172 rte_errno = err; /* Restore rte_errno. */
173 return -rte_errno;
7c673cae
FG
174}
175
176/**
9f95a23c 177 * Allocate RX queue elements for Single-Packet RQ.
7c673cae
FG
178 *
179 * @param rxq_ctrl
180 * Pointer to RX queue structure.
7c673cae
FG
181 *
182 * @return
183 * 0 on success, errno value on failure.
184 */
185static int
9f95a23c 186rxq_alloc_elts_sprq(struct mlx5_rxq_ctrl *rxq_ctrl)
7c673cae
FG
187{
188 const unsigned int sges_n = 1 << rxq_ctrl->rxq.sges_n;
9f95a23c 189 unsigned int elts_n = 1 << rxq_ctrl->rxq.elts_n;
7c673cae 190 unsigned int i;
9f95a23c 191 int err;
7c673cae
FG
192
193 /* Iterate on segments. */
194 for (i = 0; (i != elts_n); ++i) {
195 struct rte_mbuf *buf;
9f95a23c
TL
196
197 buf = rte_pktmbuf_alloc(rxq_ctrl->rxq.mp);
7c673cae 198 if (buf == NULL) {
9f95a23c
TL
199 DRV_LOG(ERR, "port %u empty mbuf pool",
200 PORT_ID(rxq_ctrl->priv));
201 rte_errno = ENOMEM;
7c673cae
FG
202 goto error;
203 }
204 /* Headroom is reserved by rte_pktmbuf_alloc(). */
205 assert(DATA_OFF(buf) == RTE_PKTMBUF_HEADROOM);
206 /* Buffer is supposed to be empty. */
207 assert(rte_pktmbuf_data_len(buf) == 0);
208 assert(rte_pktmbuf_pkt_len(buf) == 0);
209 assert(!buf->next);
210 /* Only the first segment keeps headroom. */
211 if (i % sges_n)
212 SET_DATA_OFF(buf, 0);
213 PORT(buf) = rxq_ctrl->rxq.port_id;
214 DATA_LEN(buf) = rte_pktmbuf_tailroom(buf);
215 PKT_LEN(buf) = DATA_LEN(buf);
216 NB_SEGS(buf) = 1;
7c673cae
FG
217 (*rxq_ctrl->rxq.elts)[i] = buf;
218 }
9f95a23c
TL
219 /* If Rx vector is activated. */
220 if (mlx5_rxq_check_vec_support(&rxq_ctrl->rxq) > 0) {
221 struct mlx5_rxq_data *rxq = &rxq_ctrl->rxq;
222 struct rte_mbuf *mbuf_init = &rxq->fake_mbuf;
223 int j;
224
225 /* Initialize default rearm_data for vPMD. */
226 mbuf_init->data_off = RTE_PKTMBUF_HEADROOM;
227 rte_mbuf_refcnt_set(mbuf_init, 1);
228 mbuf_init->nb_segs = 1;
229 mbuf_init->port = rxq->port_id;
230 /*
231 * prevent compiler reordering:
232 * rearm_data covers previous fields.
233 */
234 rte_compiler_barrier();
235 rxq->mbuf_initializer =
236 *(uint64_t *)&mbuf_init->rearm_data;
237 /* Padding with a fake mbuf for vectorized Rx. */
238 for (j = 0; j < MLX5_VPMD_DESCS_PER_LOOP; ++j)
239 (*rxq->elts)[elts_n + j] = &rxq->fake_mbuf;
240 }
241 DRV_LOG(DEBUG,
242 "port %u Rx queue %u allocated and configured %u segments"
243 " (max %u packets)",
244 PORT_ID(rxq_ctrl->priv), rxq_ctrl->rxq.idx, elts_n,
245 elts_n / (1 << rxq_ctrl->rxq.sges_n));
7c673cae
FG
246 return 0;
247error:
9f95a23c 248 err = rte_errno; /* Save rte_errno before cleanup. */
7c673cae
FG
249 elts_n = i;
250 for (i = 0; (i != elts_n); ++i) {
251 if ((*rxq_ctrl->rxq.elts)[i] != NULL)
252 rte_pktmbuf_free_seg((*rxq_ctrl->rxq.elts)[i]);
253 (*rxq_ctrl->rxq.elts)[i] = NULL;
254 }
9f95a23c
TL
255 DRV_LOG(DEBUG, "port %u Rx queue %u failed, freed everything",
256 PORT_ID(rxq_ctrl->priv), rxq_ctrl->rxq.idx);
257 rte_errno = err; /* Restore rte_errno. */
258 return -rte_errno;
7c673cae
FG
259}
260
261/**
9f95a23c
TL
262 * Allocate RX queue elements.
263 *
264 * @param rxq_ctrl
265 * Pointer to RX queue structure.
266 *
267 * @return
268 * 0 on success, errno value on failure.
269 */
270int
271rxq_alloc_elts(struct mlx5_rxq_ctrl *rxq_ctrl)
272{
273 return mlx5_rxq_mprq_enabled(&rxq_ctrl->rxq) ?
274 rxq_alloc_elts_mprq(rxq_ctrl) : rxq_alloc_elts_sprq(rxq_ctrl);
275}
276
277/**
278 * Free RX queue elements for Multi-Packet RQ.
7c673cae
FG
279 *
280 * @param rxq_ctrl
281 * Pointer to RX queue structure.
282 */
283static void
9f95a23c 284rxq_free_elts_mprq(struct mlx5_rxq_ctrl *rxq_ctrl)
7c673cae 285{
9f95a23c
TL
286 struct mlx5_rxq_data *rxq = &rxq_ctrl->rxq;
287 uint16_t i;
7c673cae 288
9f95a23c
TL
289 DRV_LOG(DEBUG, "port %u Multi-Packet Rx queue %u freeing WRs",
290 rxq->port_id, rxq->idx);
291 if (rxq->mprq_bufs == NULL)
7c673cae 292 return;
9f95a23c
TL
293 assert(mlx5_rxq_check_vec_support(rxq) < 0);
294 for (i = 0; (i != (1u << rxq->elts_n)); ++i) {
295 if ((*rxq->mprq_bufs)[i] != NULL)
296 mlx5_mprq_buf_free((*rxq->mprq_bufs)[i]);
297 (*rxq->mprq_bufs)[i] = NULL;
298 }
299 if (rxq->mprq_repl != NULL) {
300 mlx5_mprq_buf_free(rxq->mprq_repl);
301 rxq->mprq_repl = NULL;
302 }
303}
7c673cae 304
9f95a23c
TL
305/**
306 * Free RX queue elements for Single-Packet RQ.
307 *
308 * @param rxq_ctrl
309 * Pointer to RX queue structure.
310 */
311static void
312rxq_free_elts_sprq(struct mlx5_rxq_ctrl *rxq_ctrl)
313{
314 struct mlx5_rxq_data *rxq = &rxq_ctrl->rxq;
315 const uint16_t q_n = (1 << rxq->elts_n);
316 const uint16_t q_mask = q_n - 1;
317 uint16_t used = q_n - (rxq->rq_ci - rxq->rq_pi);
318 uint16_t i;
319
320 DRV_LOG(DEBUG, "port %u Rx queue %u freeing WRs",
321 PORT_ID(rxq_ctrl->priv), rxq->idx);
322 if (rxq->elts == NULL)
323 return;
324 /**
325 * Some mbuf in the Ring belongs to the application. They cannot be
326 * freed.
327 */
328 if (mlx5_rxq_check_vec_support(rxq) > 0) {
329 for (i = 0; i < used; ++i)
330 (*rxq->elts)[(rxq->rq_ci + i) & q_mask] = NULL;
331 rxq->rq_pi = rxq->rq_ci;
332 }
333 for (i = 0; (i != (1u << rxq->elts_n)); ++i) {
334 if ((*rxq->elts)[i] != NULL)
335 rte_pktmbuf_free_seg((*rxq->elts)[i]);
336 (*rxq->elts)[i] = NULL;
7c673cae
FG
337 }
338}
339
9f95a23c
TL
340/**
341 * Free RX queue elements.
342 *
343 * @param rxq_ctrl
344 * Pointer to RX queue structure.
345 */
346static void
347rxq_free_elts(struct mlx5_rxq_ctrl *rxq_ctrl)
348{
349 if (mlx5_rxq_mprq_enabled(&rxq_ctrl->rxq))
350 rxq_free_elts_mprq(rxq_ctrl);
351 else
352 rxq_free_elts_sprq(rxq_ctrl);
353}
354
7c673cae
FG
355/**
356 * Clean up a RX queue.
357 *
358 * Destroy objects, free allocated memory and reset the structure for reuse.
359 *
360 * @param rxq_ctrl
361 * Pointer to RX queue structure.
362 */
363void
9f95a23c 364mlx5_rxq_cleanup(struct mlx5_rxq_ctrl *rxq_ctrl)
7c673cae 365{
9f95a23c
TL
366 DRV_LOG(DEBUG, "port %u cleaning up Rx queue %u",
367 PORT_ID(rxq_ctrl->priv), rxq_ctrl->rxq.idx);
368 if (rxq_ctrl->ibv)
369 mlx5_rxq_ibv_release(rxq_ctrl->ibv);
7c673cae
FG
370 memset(rxq_ctrl, 0, sizeof(*rxq_ctrl));
371}
372
373/**
9f95a23c 374 * Returns the per-queue supported offloads.
7c673cae
FG
375 *
376 * @param dev
9f95a23c 377 * Pointer to Ethernet device.
7c673cae
FG
378 *
379 * @return
9f95a23c 380 * Supported Rx offloads.
7c673cae 381 */
9f95a23c
TL
382uint64_t
383mlx5_get_rx_queue_offloads(struct rte_eth_dev *dev)
7c673cae 384{
9f95a23c
TL
385 struct mlx5_priv *priv = dev->data->dev_private;
386 struct mlx5_dev_config *config = &priv->config;
387 uint64_t offloads = (DEV_RX_OFFLOAD_SCATTER |
388 DEV_RX_OFFLOAD_TIMESTAMP |
389 DEV_RX_OFFLOAD_JUMBO_FRAME);
390
391 if (config->hw_fcs_strip)
392 offloads |= DEV_RX_OFFLOAD_KEEP_CRC;
393
394 if (config->hw_csum)
395 offloads |= (DEV_RX_OFFLOAD_IPV4_CKSUM |
396 DEV_RX_OFFLOAD_UDP_CKSUM |
397 DEV_RX_OFFLOAD_TCP_CKSUM);
398 if (config->hw_vlan_strip)
399 offloads |= DEV_RX_OFFLOAD_VLAN_STRIP;
400 return offloads;
7c673cae
FG
401}
402
9f95a23c 403
7c673cae 404/**
9f95a23c 405 * Returns the per-port supported offloads.
7c673cae
FG
406 *
407 * @return
9f95a23c 408 * Supported Rx offloads.
7c673cae 409 */
9f95a23c
TL
410uint64_t
411mlx5_get_rx_port_offloads(void)
7c673cae 412{
9f95a23c
TL
413 uint64_t offloads = DEV_RX_OFFLOAD_VLAN_FILTER;
414
415 return offloads;
7c673cae
FG
416}
417
418/**
7c673cae
FG
419 *
420 * @param dev
421 * Pointer to Ethernet device structure.
9f95a23c
TL
422 * @param idx
423 * RX queue index.
7c673cae
FG
424 * @param desc
425 * Number of descriptors to configure in queue.
426 * @param socket
427 * NUMA socket on which memory must be allocated.
428 * @param[in] conf
429 * Thresholds parameters.
430 * @param mp
431 * Memory pool for buffer allocations.
432 *
433 * @return
9f95a23c 434 * 0 on success, a negative errno value otherwise and rte_errno is set.
7c673cae
FG
435 */
436int
9f95a23c
TL
437mlx5_rx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
438 unsigned int socket, const struct rte_eth_rxconf *conf,
439 struct rte_mempool *mp)
7c673cae 440{
9f95a23c
TL
441 struct mlx5_priv *priv = dev->data->dev_private;
442 struct mlx5_rxq_data *rxq = (*priv->rxqs)[idx];
443 struct mlx5_rxq_ctrl *rxq_ctrl =
444 container_of(rxq, struct mlx5_rxq_ctrl, rxq);
7c673cae 445
9f95a23c
TL
446 if (!rte_is_power_of_2(desc)) {
447 desc = 1 << log2above(desc);
448 DRV_LOG(WARNING,
449 "port %u increased number of descriptors in Rx queue %u"
450 " to the next power of two (%d)",
451 dev->data->port_id, idx, desc);
7c673cae 452 }
9f95a23c
TL
453 DRV_LOG(DEBUG, "port %u configuring Rx queue %u for %u descriptors",
454 dev->data->port_id, idx, desc);
455 if (idx >= priv->rxqs_n) {
456 DRV_LOG(ERR, "port %u Rx queue index out of range (%u >= %u)",
457 dev->data->port_id, idx, priv->rxqs_n);
458 rte_errno = EOVERFLOW;
459 return -rte_errno;
7c673cae 460 }
9f95a23c
TL
461 if (!mlx5_rxq_releasable(dev, idx)) {
462 DRV_LOG(ERR, "port %u unable to release queue index %u",
463 dev->data->port_id, idx);
464 rte_errno = EBUSY;
465 return -rte_errno;
7c673cae 466 }
9f95a23c
TL
467 mlx5_rxq_release(dev, idx);
468 rxq_ctrl = mlx5_rxq_new(dev, idx, desc, socket, conf, mp);
469 if (!rxq_ctrl) {
470 DRV_LOG(ERR, "port %u unable to allocate queue index %u",
471 dev->data->port_id, idx);
472 rte_errno = ENOMEM;
473 return -rte_errno;
7c673cae 474 }
9f95a23c
TL
475 DRV_LOG(DEBUG, "port %u adding Rx queue %u to list",
476 dev->data->port_id, idx);
477 (*priv->rxqs)[idx] = &rxq_ctrl->rxq;
7c673cae 478 return 0;
7c673cae
FG
479}
480
481/**
9f95a23c 482 * DPDK callback to release a RX queue.
7c673cae 483 *
9f95a23c
TL
484 * @param dpdk_rxq
485 * Generic RX queue pointer.
486 */
487void
488mlx5_rx_queue_release(void *dpdk_rxq)
489{
490 struct mlx5_rxq_data *rxq = (struct mlx5_rxq_data *)dpdk_rxq;
491 struct mlx5_rxq_ctrl *rxq_ctrl;
492 struct mlx5_priv *priv;
493
494 if (rxq == NULL)
495 return;
496 rxq_ctrl = container_of(rxq, struct mlx5_rxq_ctrl, rxq);
497 priv = rxq_ctrl->priv;
498 if (!mlx5_rxq_releasable(ETH_DEV(priv), rxq_ctrl->rxq.idx))
499 rte_panic("port %u Rx queue %u is still used by a flow and"
500 " cannot be removed\n",
501 PORT_ID(priv), rxq->idx);
502 mlx5_rxq_release(ETH_DEV(priv), rxq_ctrl->rxq.idx);
503}
504
505/**
506 * Allocate queue vector and fill epoll fd list for Rx interrupts.
507 *
508 * @param dev
509 * Pointer to Ethernet device.
7c673cae
FG
510 *
511 * @return
9f95a23c 512 * 0 on success, a negative errno value otherwise and rte_errno is set.
7c673cae
FG
513 */
514int
9f95a23c 515mlx5_rx_intr_vec_enable(struct rte_eth_dev *dev)
7c673cae 516{
9f95a23c
TL
517 struct mlx5_priv *priv = dev->data->dev_private;
518 unsigned int i;
519 unsigned int rxqs_n = priv->rxqs_n;
520 unsigned int n = RTE_MIN(rxqs_n, (uint32_t)RTE_MAX_RXTX_INTR_VEC_ID);
521 unsigned int count = 0;
522 struct rte_intr_handle *intr_handle = dev->intr_handle;
7c673cae 523
9f95a23c
TL
524 if (!dev->data->dev_conf.intr_conf.rxq)
525 return 0;
526 mlx5_rx_intr_vec_disable(dev);
527 intr_handle->intr_vec = malloc(n * sizeof(intr_handle->intr_vec[0]));
528 if (intr_handle->intr_vec == NULL) {
529 DRV_LOG(ERR,
530 "port %u failed to allocate memory for interrupt"
531 " vector, Rx interrupts will not be supported",
532 dev->data->port_id);
533 rte_errno = ENOMEM;
534 return -rte_errno;
7c673cae 535 }
9f95a23c
TL
536 intr_handle->type = RTE_INTR_HANDLE_EXT;
537 for (i = 0; i != n; ++i) {
538 /* This rxq ibv must not be released in this function. */
539 struct mlx5_rxq_ibv *rxq_ibv = mlx5_rxq_ibv_get(dev, i);
540 int fd;
541 int flags;
542 int rc;
543
544 /* Skip queues that cannot request interrupts. */
545 if (!rxq_ibv || !rxq_ibv->channel) {
546 /* Use invalid intr_vec[] index to disable entry. */
547 intr_handle->intr_vec[i] =
548 RTE_INTR_VEC_RXTX_OFFSET +
549 RTE_MAX_RXTX_INTR_VEC_ID;
550 continue;
7c673cae 551 }
9f95a23c
TL
552 if (count >= RTE_MAX_RXTX_INTR_VEC_ID) {
553 DRV_LOG(ERR,
554 "port %u too many Rx queues for interrupt"
555 " vector size (%d), Rx interrupts cannot be"
556 " enabled",
557 dev->data->port_id, RTE_MAX_RXTX_INTR_VEC_ID);
558 mlx5_rx_intr_vec_disable(dev);
559 rte_errno = ENOMEM;
560 return -rte_errno;
11fdf7f2 561 }
9f95a23c
TL
562 fd = rxq_ibv->channel->fd;
563 flags = fcntl(fd, F_GETFL);
564 rc = fcntl(fd, F_SETFL, flags | O_NONBLOCK);
565 if (rc < 0) {
566 rte_errno = errno;
567 DRV_LOG(ERR,
568 "port %u failed to make Rx interrupt file"
569 " descriptor %d non-blocking for queue index"
570 " %d",
571 dev->data->port_id, fd, i);
572 mlx5_rx_intr_vec_disable(dev);
573 return -rte_errno;
7c673cae 574 }
9f95a23c
TL
575 intr_handle->intr_vec[i] = RTE_INTR_VEC_RXTX_OFFSET + count;
576 intr_handle->efds[count] = fd;
577 count++;
7c673cae 578 }
9f95a23c
TL
579 if (!count)
580 mlx5_rx_intr_vec_disable(dev);
581 else
582 intr_handle->nb_efd = count;
583 return 0;
7c673cae
FG
584}
585
586/**
9f95a23c 587 * Clean up Rx interrupts handler.
7c673cae 588 *
9f95a23c
TL
589 * @param dev
590 * Pointer to Ethernet device.
7c673cae
FG
591 */
592void
9f95a23c 593mlx5_rx_intr_vec_disable(struct rte_eth_dev *dev)
7c673cae 594{
9f95a23c
TL
595 struct mlx5_priv *priv = dev->data->dev_private;
596 struct rte_intr_handle *intr_handle = dev->intr_handle;
7c673cae 597 unsigned int i;
9f95a23c
TL
598 unsigned int rxqs_n = priv->rxqs_n;
599 unsigned int n = RTE_MIN(rxqs_n, (uint32_t)RTE_MAX_RXTX_INTR_VEC_ID);
7c673cae 600
9f95a23c 601 if (!dev->data->dev_conf.intr_conf.rxq)
7c673cae 602 return;
9f95a23c
TL
603 if (!intr_handle->intr_vec)
604 goto free;
605 for (i = 0; i != n; ++i) {
606 struct mlx5_rxq_ctrl *rxq_ctrl;
607 struct mlx5_rxq_data *rxq_data;
7c673cae 608
9f95a23c
TL
609 if (intr_handle->intr_vec[i] == RTE_INTR_VEC_RXTX_OFFSET +
610 RTE_MAX_RXTX_INTR_VEC_ID)
611 continue;
612 /**
613 * Need to access directly the queue to release the reference
614 * kept in priv_rx_intr_vec_enable().
615 */
616 rxq_data = (*priv->rxqs)[i];
617 rxq_ctrl = container_of(rxq_data, struct mlx5_rxq_ctrl, rxq);
618 mlx5_rxq_ibv_release(rxq_ctrl->ibv);
619 }
620free:
621 rte_intr_free_epoll_fd(intr_handle);
622 if (intr_handle->intr_vec)
623 free(intr_handle->intr_vec);
624 intr_handle->nb_efd = 0;
625 intr_handle->intr_vec = NULL;
626}
627
628/**
629 * MLX5 CQ notification .
630 *
631 * @param rxq
632 * Pointer to receive queue structure.
633 * @param sq_n_rxq
634 * Sequence number per receive queue .
635 */
636static inline void
637mlx5_arm_cq(struct mlx5_rxq_data *rxq, int sq_n_rxq)
638{
639 int sq_n = 0;
640 uint32_t doorbell_hi;
641 uint64_t doorbell;
642 void *cq_db_reg = (char *)rxq->cq_uar + MLX5_CQ_DOORBELL;
643
644 sq_n = sq_n_rxq & MLX5_CQ_SQN_MASK;
645 doorbell_hi = sq_n << MLX5_CQ_SQN_OFFSET | (rxq->cq_ci & MLX5_CI_MASK);
646 doorbell = (uint64_t)doorbell_hi << 32;
647 doorbell |= rxq->cqn;
648 rxq->cq_db[MLX5_CQ_ARM_DB] = rte_cpu_to_be_32(doorbell_hi);
649 mlx5_uar_write64(rte_cpu_to_be_64(doorbell),
650 cq_db_reg, rxq->uar_lock_cq);
7c673cae
FG
651}
652
653/**
9f95a23c 654 * DPDK callback for Rx queue interrupt enable.
7c673cae 655 *
9f95a23c
TL
656 * @param dev
657 * Pointer to Ethernet device structure.
658 * @param rx_queue_id
659 * Rx queue number.
7c673cae 660 *
9f95a23c
TL
661 * @return
662 * 0 on success, a negative errno value otherwise and rte_errno is set.
663 */
664int
665mlx5_rx_intr_enable(struct rte_eth_dev *dev, uint16_t rx_queue_id)
666{
667 struct mlx5_priv *priv = dev->data->dev_private;
668 struct mlx5_rxq_data *rxq_data;
669 struct mlx5_rxq_ctrl *rxq_ctrl;
670
671 rxq_data = (*priv->rxqs)[rx_queue_id];
672 if (!rxq_data) {
673 rte_errno = EINVAL;
674 return -rte_errno;
675 }
676 rxq_ctrl = container_of(rxq_data, struct mlx5_rxq_ctrl, rxq);
677 if (rxq_ctrl->irq) {
678 struct mlx5_rxq_ibv *rxq_ibv;
679
680 rxq_ibv = mlx5_rxq_ibv_get(dev, rx_queue_id);
681 if (!rxq_ibv) {
682 rte_errno = EINVAL;
683 return -rte_errno;
684 }
685 mlx5_arm_cq(rxq_data, rxq_data->cq_arm_sn);
686 mlx5_rxq_ibv_release(rxq_ibv);
687 }
688 return 0;
689}
690
691/**
692 * DPDK callback for Rx queue interrupt disable.
693 *
694 * @param dev
695 * Pointer to Ethernet device structure.
696 * @param rx_queue_id
697 * Rx queue number.
7c673cae
FG
698 *
699 * @return
9f95a23c 700 * 0 on success, a negative errno value otherwise and rte_errno is set.
7c673cae 701 */
9f95a23c
TL
702int
703mlx5_rx_intr_disable(struct rte_eth_dev *dev, uint16_t rx_queue_id)
7c673cae 704{
9f95a23c
TL
705 struct mlx5_priv *priv = dev->data->dev_private;
706 struct mlx5_rxq_data *rxq_data;
707 struct mlx5_rxq_ctrl *rxq_ctrl;
708 struct mlx5_rxq_ibv *rxq_ibv = NULL;
709 struct ibv_cq *ev_cq;
710 void *ev_ctx;
711 int ret;
7c673cae 712
9f95a23c
TL
713 rxq_data = (*priv->rxqs)[rx_queue_id];
714 if (!rxq_data) {
715 rte_errno = EINVAL;
716 return -rte_errno;
717 }
718 rxq_ctrl = container_of(rxq_data, struct mlx5_rxq_ctrl, rxq);
719 if (!rxq_ctrl->irq)
7c673cae 720 return 0;
9f95a23c
TL
721 rxq_ibv = mlx5_rxq_ibv_get(dev, rx_queue_id);
722 if (!rxq_ibv) {
723 rte_errno = EINVAL;
724 return -rte_errno;
725 }
726 ret = mlx5_glue->get_cq_event(rxq_ibv->channel, &ev_cq, &ev_ctx);
727 if (ret || ev_cq != rxq_ibv->cq) {
728 rte_errno = EINVAL;
729 goto exit;
730 }
731 rxq_data->cq_arm_sn++;
732 mlx5_glue->ack_cq_events(rxq_ibv->cq, 1);
733 mlx5_rxq_ibv_release(rxq_ibv);
734 return 0;
735exit:
736 ret = rte_errno; /* Save rte_errno before cleanup. */
737 if (rxq_ibv)
738 mlx5_rxq_ibv_release(rxq_ibv);
739 DRV_LOG(WARNING, "port %u unable to disable interrupt on Rx queue %d",
740 dev->data->port_id, rx_queue_id);
741 rte_errno = ret; /* Restore rte_errno. */
742 return -rte_errno;
7c673cae 743}
11fdf7f2
TL
744
745/**
9f95a23c 746 * Create the Rx queue Verbs object.
11fdf7f2 747 *
9f95a23c
TL
748 * @param dev
749 * Pointer to Ethernet device.
750 * @param idx
751 * Queue index in DPDK Rx queue array
11fdf7f2
TL
752 *
753 * @return
9f95a23c 754 * The Verbs object initialised, NULL otherwise and rte_errno is set.
11fdf7f2 755 */
9f95a23c
TL
756struct mlx5_rxq_ibv *
757mlx5_rxq_ibv_new(struct rte_eth_dev *dev, uint16_t idx)
11fdf7f2 758{
9f95a23c
TL
759 struct mlx5_priv *priv = dev->data->dev_private;
760 struct mlx5_rxq_data *rxq_data = (*priv->rxqs)[idx];
761 struct mlx5_rxq_ctrl *rxq_ctrl =
762 container_of(rxq_data, struct mlx5_rxq_ctrl, rxq);
763 struct ibv_wq_attr mod;
764 union {
765 struct {
766 struct ibv_cq_init_attr_ex ibv;
767 struct mlx5dv_cq_init_attr mlx5;
768 } cq;
769 struct {
770 struct ibv_wq_init_attr ibv;
771#ifdef HAVE_IBV_DEVICE_STRIDING_RQ_SUPPORT
772 struct mlx5dv_wq_init_attr mlx5;
773#endif
774 } wq;
775 struct ibv_cq_ex cq_attr;
776 } attr;
777 unsigned int cqe_n;
778 unsigned int wqe_n = 1 << rxq_data->elts_n;
779 struct mlx5_rxq_ibv *tmpl;
780 struct mlx5dv_cq cq_info;
781 struct mlx5dv_rwq rwq;
11fdf7f2 782 unsigned int i;
9f95a23c
TL
783 int ret = 0;
784 struct mlx5dv_obj obj;
785 struct mlx5_dev_config *config = &priv->config;
786 const int mprq_en = mlx5_rxq_mprq_enabled(rxq_data);
787
788 assert(rxq_data);
789 assert(!rxq_ctrl->ibv);
790 priv->verbs_alloc_ctx.type = MLX5_VERBS_ALLOC_TYPE_RX_QUEUE;
791 priv->verbs_alloc_ctx.obj = rxq_ctrl;
792 tmpl = rte_calloc_socket(__func__, 1, sizeof(*tmpl), 0,
793 rxq_ctrl->socket);
794 if (!tmpl) {
795 DRV_LOG(ERR,
796 "port %u Rx queue %u cannot allocate verbs resources",
797 dev->data->port_id, rxq_data->idx);
798 rte_errno = ENOMEM;
799 goto error;
800 }
801 tmpl->rxq_ctrl = rxq_ctrl;
802 if (rxq_ctrl->irq) {
803 tmpl->channel = mlx5_glue->create_comp_channel(priv->sh->ctx);
804 if (!tmpl->channel) {
805 DRV_LOG(ERR, "port %u: comp channel creation failure",
806 dev->data->port_id);
807 rte_errno = ENOMEM;
808 goto error;
809 }
810 }
811 if (mprq_en)
812 cqe_n = wqe_n * (1 << rxq_data->strd_num_n) - 1;
813 else
814 cqe_n = wqe_n - 1;
815 attr.cq.ibv = (struct ibv_cq_init_attr_ex){
816 .cqe = cqe_n,
817 .channel = tmpl->channel,
818 .comp_mask = 0,
819 };
820 attr.cq.mlx5 = (struct mlx5dv_cq_init_attr){
821 .comp_mask = 0,
822 };
823 if (config->cqe_comp && !rxq_data->hw_timestamp) {
824 attr.cq.mlx5.comp_mask |=
825 MLX5DV_CQ_INIT_ATTR_MASK_COMPRESSED_CQE;
826#ifdef HAVE_IBV_DEVICE_STRIDING_RQ_SUPPORT
827 attr.cq.mlx5.cqe_comp_res_format =
828 mprq_en ? MLX5DV_CQE_RES_FORMAT_CSUM_STRIDX :
829 MLX5DV_CQE_RES_FORMAT_HASH;
830#else
831 attr.cq.mlx5.cqe_comp_res_format = MLX5DV_CQE_RES_FORMAT_HASH;
832#endif
833 /*
834 * For vectorized Rx, it must not be doubled in order to
835 * make cq_ci and rq_ci aligned.
836 */
837 if (mlx5_rxq_check_vec_support(rxq_data) < 0)
838 attr.cq.ibv.cqe *= 2;
839 } else if (config->cqe_comp && rxq_data->hw_timestamp) {
840 DRV_LOG(DEBUG,
841 "port %u Rx CQE compression is disabled for HW"
842 " timestamp",
843 dev->data->port_id);
844 }
845#ifdef HAVE_IBV_MLX5_MOD_CQE_128B_PAD
846 if (config->cqe_pad) {
847 attr.cq.mlx5.comp_mask |= MLX5DV_CQ_INIT_ATTR_MASK_FLAGS;
848 attr.cq.mlx5.flags |= MLX5DV_CQ_INIT_ATTR_FLAGS_CQE_PAD;
849 }
850#endif
851 tmpl->cq = mlx5_glue->cq_ex_to_cq
852 (mlx5_glue->dv_create_cq(priv->sh->ctx, &attr.cq.ibv,
853 &attr.cq.mlx5));
854 if (tmpl->cq == NULL) {
855 DRV_LOG(ERR, "port %u Rx queue %u CQ creation failure",
856 dev->data->port_id, idx);
857 rte_errno = ENOMEM;
858 goto error;
859 }
860 DRV_LOG(DEBUG, "port %u device_attr.max_qp_wr is %d",
861 dev->data->port_id, priv->sh->device_attr.orig_attr.max_qp_wr);
862 DRV_LOG(DEBUG, "port %u device_attr.max_sge is %d",
863 dev->data->port_id, priv->sh->device_attr.orig_attr.max_sge);
864 attr.wq.ibv = (struct ibv_wq_init_attr){
865 .wq_context = NULL, /* Could be useful in the future. */
866 .wq_type = IBV_WQT_RQ,
867 /* Max number of outstanding WRs. */
868 .max_wr = wqe_n >> rxq_data->sges_n,
869 /* Max number of scatter/gather elements in a WR. */
870 .max_sge = 1 << rxq_data->sges_n,
871 .pd = priv->sh->pd,
872 .cq = tmpl->cq,
873 .comp_mask =
874 IBV_WQ_FLAGS_CVLAN_STRIPPING |
875 0,
876 .create_flags = (rxq_data->vlan_strip ?
877 IBV_WQ_FLAGS_CVLAN_STRIPPING :
878 0),
879 };
880 /* By default, FCS (CRC) is stripped by hardware. */
881 if (rxq_data->crc_present) {
882 attr.wq.ibv.create_flags |= IBV_WQ_FLAGS_SCATTER_FCS;
883 attr.wq.ibv.comp_mask |= IBV_WQ_INIT_ATTR_FLAGS;
884 }
885 if (config->hw_padding) {
886#if defined(HAVE_IBV_WQ_FLAG_RX_END_PADDING)
887 attr.wq.ibv.create_flags |= IBV_WQ_FLAG_RX_END_PADDING;
888 attr.wq.ibv.comp_mask |= IBV_WQ_INIT_ATTR_FLAGS;
889#elif defined(HAVE_IBV_WQ_FLAGS_PCI_WRITE_END_PADDING)
890 attr.wq.ibv.create_flags |= IBV_WQ_FLAGS_PCI_WRITE_END_PADDING;
891 attr.wq.ibv.comp_mask |= IBV_WQ_INIT_ATTR_FLAGS;
892#endif
893 }
894#ifdef HAVE_IBV_DEVICE_STRIDING_RQ_SUPPORT
895 attr.wq.mlx5 = (struct mlx5dv_wq_init_attr){
896 .comp_mask = 0,
897 };
898 if (mprq_en) {
899 struct mlx5dv_striding_rq_init_attr *mprq_attr =
900 &attr.wq.mlx5.striding_rq_attrs;
901
902 attr.wq.mlx5.comp_mask |= MLX5DV_WQ_INIT_ATTR_MASK_STRIDING_RQ;
903 *mprq_attr = (struct mlx5dv_striding_rq_init_attr){
904 .single_stride_log_num_of_bytes = rxq_data->strd_sz_n,
905 .single_wqe_log_num_of_strides = rxq_data->strd_num_n,
906 .two_byte_shift_en = MLX5_MPRQ_TWO_BYTE_SHIFT,
907 };
908 }
909 tmpl->wq = mlx5_glue->dv_create_wq(priv->sh->ctx, &attr.wq.ibv,
910 &attr.wq.mlx5);
911#else
912 tmpl->wq = mlx5_glue->create_wq(priv->sh->ctx, &attr.wq.ibv);
913#endif
914 if (tmpl->wq == NULL) {
915 DRV_LOG(ERR, "port %u Rx queue %u WQ creation failure",
916 dev->data->port_id, idx);
917 rte_errno = ENOMEM;
918 goto error;
919 }
920 /*
921 * Make sure number of WRs*SGEs match expectations since a queue
922 * cannot allocate more than "desc" buffers.
923 */
924 if (attr.wq.ibv.max_wr != (wqe_n >> rxq_data->sges_n) ||
925 attr.wq.ibv.max_sge != (1u << rxq_data->sges_n)) {
926 DRV_LOG(ERR,
927 "port %u Rx queue %u requested %u*%u but got %u*%u"
928 " WRs*SGEs",
929 dev->data->port_id, idx,
930 wqe_n >> rxq_data->sges_n, (1 << rxq_data->sges_n),
931 attr.wq.ibv.max_wr, attr.wq.ibv.max_sge);
932 rte_errno = EINVAL;
933 goto error;
934 }
935 /* Change queue state to ready. */
936 mod = (struct ibv_wq_attr){
937 .attr_mask = IBV_WQ_ATTR_STATE,
938 .wq_state = IBV_WQS_RDY,
939 };
940 ret = mlx5_glue->modify_wq(tmpl->wq, &mod);
941 if (ret) {
942 DRV_LOG(ERR,
943 "port %u Rx queue %u WQ state to IBV_WQS_RDY failed",
944 dev->data->port_id, idx);
945 rte_errno = ret;
946 goto error;
947 }
948 obj.cq.in = tmpl->cq;
949 obj.cq.out = &cq_info;
950 obj.rwq.in = tmpl->wq;
951 obj.rwq.out = &rwq;
952 ret = mlx5_glue->dv_init_obj(&obj, MLX5DV_OBJ_CQ | MLX5DV_OBJ_RWQ);
953 if (ret) {
954 rte_errno = ret;
955 goto error;
956 }
957 if (cq_info.cqe_size != RTE_CACHE_LINE_SIZE) {
958 DRV_LOG(ERR,
959 "port %u wrong MLX5_CQE_SIZE environment variable"
960 " value: it should be set to %u",
961 dev->data->port_id, RTE_CACHE_LINE_SIZE);
962 rte_errno = EINVAL;
963 goto error;
964 }
965 /* Fill the rings. */
966 rxq_data->wqes = rwq.buf;
967 for (i = 0; (i != wqe_n); ++i) {
968 volatile struct mlx5_wqe_data_seg *scat;
969 uintptr_t addr;
970 uint32_t byte_count;
971
972 if (mprq_en) {
973 struct mlx5_mprq_buf *buf = (*rxq_data->mprq_bufs)[i];
974
975 scat = &((volatile struct mlx5_wqe_mprq *)
976 rxq_data->wqes)[i].dseg;
977 addr = (uintptr_t)mlx5_mprq_buf_addr(buf);
978 byte_count = (1 << rxq_data->strd_sz_n) *
979 (1 << rxq_data->strd_num_n);
980 } else {
981 struct rte_mbuf *buf = (*rxq_data->elts)[i];
982
983 scat = &((volatile struct mlx5_wqe_data_seg *)
984 rxq_data->wqes)[i];
985 addr = rte_pktmbuf_mtod(buf, uintptr_t);
986 byte_count = DATA_LEN(buf);
987 }
988 /* scat->addr must be able to store a pointer. */
989 assert(sizeof(scat->addr) >= sizeof(uintptr_t));
990 *scat = (struct mlx5_wqe_data_seg){
991 .addr = rte_cpu_to_be_64(addr),
992 .byte_count = rte_cpu_to_be_32(byte_count),
993 .lkey = mlx5_rx_addr2mr(rxq_data, addr),
994 };
995 }
996 rxq_data->rq_db = rwq.dbrec;
997 rxq_data->cqe_n = log2above(cq_info.cqe_cnt);
998 rxq_data->cq_ci = 0;
999 rxq_data->consumed_strd = 0;
1000 rxq_data->rq_pi = 0;
1001 rxq_data->zip = (struct rxq_zip){
1002 .ai = 0,
1003 };
1004 rxq_data->cq_db = cq_info.dbrec;
1005 rxq_data->cqes = (volatile struct mlx5_cqe (*)[])(uintptr_t)cq_info.buf;
1006 rxq_data->cq_uar = cq_info.cq_uar;
1007 rxq_data->cqn = cq_info.cqn;
1008 rxq_data->cq_arm_sn = 0;
1009 /* Update doorbell counter. */
1010 rxq_data->rq_ci = wqe_n >> rxq_data->sges_n;
1011 rte_wmb();
1012 *rxq_data->rq_db = rte_cpu_to_be_32(rxq_data->rq_ci);
1013 DRV_LOG(DEBUG, "port %u rxq %u updated with %p", dev->data->port_id,
1014 idx, (void *)&tmpl);
1015 rte_atomic32_inc(&tmpl->refcnt);
1016 LIST_INSERT_HEAD(&priv->rxqsibv, tmpl, next);
1017 priv->verbs_alloc_ctx.type = MLX5_VERBS_ALLOC_TYPE_NONE;
1018 return tmpl;
1019error:
1020 ret = rte_errno; /* Save rte_errno before cleanup. */
1021 if (tmpl->wq)
1022 claim_zero(mlx5_glue->destroy_wq(tmpl->wq));
1023 if (tmpl->cq)
1024 claim_zero(mlx5_glue->destroy_cq(tmpl->cq));
1025 if (tmpl->channel)
1026 claim_zero(mlx5_glue->destroy_comp_channel(tmpl->channel));
1027 priv->verbs_alloc_ctx.type = MLX5_VERBS_ALLOC_TYPE_NONE;
1028 rte_errno = ret; /* Restore rte_errno. */
1029 return NULL;
1030}
1031
1032/**
1033 * Get an Rx queue Verbs object.
1034 *
1035 * @param dev
1036 * Pointer to Ethernet device.
1037 * @param idx
1038 * Queue index in DPDK Rx queue array
1039 *
1040 * @return
1041 * The Verbs object if it exists.
1042 */
1043struct mlx5_rxq_ibv *
1044mlx5_rxq_ibv_get(struct rte_eth_dev *dev, uint16_t idx)
1045{
1046 struct mlx5_priv *priv = dev->data->dev_private;
1047 struct mlx5_rxq_data *rxq_data = (*priv->rxqs)[idx];
1048 struct mlx5_rxq_ctrl *rxq_ctrl;
1049
1050 if (idx >= priv->rxqs_n)
1051 return NULL;
1052 if (!rxq_data)
1053 return NULL;
1054 rxq_ctrl = container_of(rxq_data, struct mlx5_rxq_ctrl, rxq);
1055 if (rxq_ctrl->ibv) {
1056 rte_atomic32_inc(&rxq_ctrl->ibv->refcnt);
1057 }
1058 return rxq_ctrl->ibv;
1059}
11fdf7f2 1060
9f95a23c
TL
1061/**
1062 * Release an Rx verbs queue object.
1063 *
1064 * @param rxq_ibv
1065 * Verbs Rx queue object.
1066 *
1067 * @return
1068 * 1 while a reference on it exists, 0 when freed.
1069 */
1070int
1071mlx5_rxq_ibv_release(struct mlx5_rxq_ibv *rxq_ibv)
1072{
1073 assert(rxq_ibv);
1074 assert(rxq_ibv->wq);
1075 assert(rxq_ibv->cq);
1076 if (rte_atomic32_dec_and_test(&rxq_ibv->refcnt)) {
1077 rxq_free_elts(rxq_ibv->rxq_ctrl);
1078 claim_zero(mlx5_glue->destroy_wq(rxq_ibv->wq));
1079 claim_zero(mlx5_glue->destroy_cq(rxq_ibv->cq));
1080 if (rxq_ibv->channel)
1081 claim_zero(mlx5_glue->destroy_comp_channel
1082 (rxq_ibv->channel));
1083 LIST_REMOVE(rxq_ibv, next);
1084 rte_free(rxq_ibv);
11fdf7f2 1085 return 0;
11fdf7f2 1086 }
9f95a23c
TL
1087 return 1;
1088}
11fdf7f2 1089
9f95a23c
TL
1090/**
1091 * Verify the Verbs Rx queue list is empty
1092 *
1093 * @param dev
1094 * Pointer to Ethernet device.
1095 *
1096 * @return
1097 * The number of object not released.
1098 */
1099int
1100mlx5_rxq_ibv_verify(struct rte_eth_dev *dev)
1101{
1102 struct mlx5_priv *priv = dev->data->dev_private;
1103 int ret = 0;
1104 struct mlx5_rxq_ibv *rxq_ibv;
1105
1106 LIST_FOREACH(rxq_ibv, &priv->rxqsibv, next) {
1107 DRV_LOG(DEBUG, "port %u Verbs Rx queue %u still referenced",
1108 dev->data->port_id, rxq_ibv->rxq_ctrl->rxq.idx);
1109 ++ret;
11fdf7f2 1110 }
9f95a23c 1111 return ret;
11fdf7f2
TL
1112}
1113
1114/**
9f95a23c 1115 * Return true if a single reference exists on the object.
11fdf7f2 1116 *
9f95a23c
TL
1117 * @param rxq_ibv
1118 * Verbs Rx queue object.
11fdf7f2 1119 */
9f95a23c
TL
1120int
1121mlx5_rxq_ibv_releasable(struct mlx5_rxq_ibv *rxq_ibv)
11fdf7f2 1122{
9f95a23c
TL
1123 assert(rxq_ibv);
1124 return (rte_atomic32_read(&rxq_ibv->refcnt) == 1);
1125}
11fdf7f2 1126
9f95a23c
TL
1127/**
1128 * Callback function to initialize mbufs for Multi-Packet RQ.
1129 */
1130static inline void
1131mlx5_mprq_buf_init(struct rte_mempool *mp, void *opaque_arg __rte_unused,
1132 void *_m, unsigned int i __rte_unused)
1133{
1134 struct mlx5_mprq_buf *buf = _m;
1135
1136 memset(_m, 0, sizeof(*buf));
1137 buf->mp = mp;
1138 rte_atomic16_set(&buf->refcnt, 1);
11fdf7f2
TL
1139}
1140
1141/**
9f95a23c 1142 * Free mempool of Multi-Packet RQ.
11fdf7f2 1143 *
9f95a23c
TL
1144 * @param dev
1145 * Pointer to Ethernet device.
11fdf7f2
TL
1146 *
1147 * @return
9f95a23c 1148 * 0 on success, negative errno value on failure.
11fdf7f2
TL
1149 */
1150int
9f95a23c 1151mlx5_mprq_free_mp(struct rte_eth_dev *dev)
11fdf7f2 1152{
9f95a23c
TL
1153 struct mlx5_priv *priv = dev->data->dev_private;
1154 struct rte_mempool *mp = priv->mprq_mp;
11fdf7f2 1155 unsigned int i;
11fdf7f2 1156
9f95a23c 1157 if (mp == NULL)
11fdf7f2 1158 return 0;
9f95a23c
TL
1159 DRV_LOG(DEBUG, "port %u freeing mempool (%s) for Multi-Packet RQ",
1160 dev->data->port_id, mp->name);
1161 /*
1162 * If a buffer in the pool has been externally attached to a mbuf and it
1163 * is still in use by application, destroying the Rx qeueue can spoil
1164 * the packet. It is unlikely to happen but if application dynamically
1165 * creates and destroys with holding Rx packets, this can happen.
1166 *
1167 * TODO: It is unavoidable for now because the mempool for Multi-Packet
1168 * RQ isn't provided by application but managed by PMD.
1169 */
1170 if (!rte_mempool_full(mp)) {
1171 DRV_LOG(ERR,
1172 "port %u mempool for Multi-Packet RQ is still in use",
1173 dev->data->port_id);
1174 rte_errno = EBUSY;
1175 return -rte_errno;
11fdf7f2 1176 }
9f95a23c
TL
1177 rte_mempool_free(mp);
1178 /* Unset mempool for each Rx queue. */
1179 for (i = 0; i != priv->rxqs_n; ++i) {
1180 struct mlx5_rxq_data *rxq = (*priv->rxqs)[i];
1181
1182 if (rxq == NULL)
1183 continue;
1184 rxq->mprq_mp = NULL;
11fdf7f2 1185 }
9f95a23c 1186 priv->mprq_mp = NULL;
11fdf7f2
TL
1187 return 0;
1188}
1189
1190/**
9f95a23c
TL
1191 * Allocate a mempool for Multi-Packet RQ. All configured Rx queues share the
1192 * mempool. If already allocated, reuse it if there're enough elements.
1193 * Otherwise, resize it.
11fdf7f2 1194 *
9f95a23c
TL
1195 * @param dev
1196 * Pointer to Ethernet device.
11fdf7f2
TL
1197 *
1198 * @return
9f95a23c 1199 * 0 on success, negative errno value on failure.
11fdf7f2 1200 */
9f95a23c
TL
1201int
1202mlx5_mprq_alloc_mp(struct rte_eth_dev *dev)
11fdf7f2 1203{
9f95a23c
TL
1204 struct mlx5_priv *priv = dev->data->dev_private;
1205 struct rte_mempool *mp = priv->mprq_mp;
1206 char name[RTE_MEMPOOL_NAMESIZE];
1207 unsigned int desc = 0;
1208 unsigned int buf_len;
1209 unsigned int obj_num;
1210 unsigned int obj_size;
1211 unsigned int strd_num_n = 0;
1212 unsigned int strd_sz_n = 0;
1213 unsigned int i;
1214
1215 if (!mlx5_mprq_enabled(dev))
1216 return 0;
1217 /* Count the total number of descriptors configured. */
1218 for (i = 0; i != priv->rxqs_n; ++i) {
1219 struct mlx5_rxq_data *rxq = (*priv->rxqs)[i];
1220
1221 if (rxq == NULL)
1222 continue;
1223 desc += 1 << rxq->elts_n;
1224 /* Get the max number of strides. */
1225 if (strd_num_n < rxq->strd_num_n)
1226 strd_num_n = rxq->strd_num_n;
1227 /* Get the max size of a stride. */
1228 if (strd_sz_n < rxq->strd_sz_n)
1229 strd_sz_n = rxq->strd_sz_n;
1230 }
1231 assert(strd_num_n && strd_sz_n);
1232 buf_len = (1 << strd_num_n) * (1 << strd_sz_n);
1233 obj_size = buf_len + sizeof(struct mlx5_mprq_buf);
1234 /*
1235 * Received packets can be either memcpy'd or externally referenced. In
1236 * case that the packet is attached to an mbuf as an external buffer, as
1237 * it isn't possible to predict how the buffers will be queued by
1238 * application, there's no option to exactly pre-allocate needed buffers
1239 * in advance but to speculatively prepares enough buffers.
1240 *
1241 * In the data path, if this Mempool is depleted, PMD will try to memcpy
1242 * received packets to buffers provided by application (rxq->mp) until
1243 * this Mempool gets available again.
1244 */
1245 desc *= 4;
1246 obj_num = desc + MLX5_MPRQ_MP_CACHE_SZ * priv->rxqs_n;
1247 /*
1248 * rte_mempool_create_empty() has sanity check to refuse large cache
1249 * size compared to the number of elements.
1250 * CACHE_FLUSHTHRESH_MULTIPLIER is defined in a C file, so using a
1251 * constant number 2 instead.
1252 */
1253 obj_num = RTE_MAX(obj_num, MLX5_MPRQ_MP_CACHE_SZ * 2);
1254 /* Check a mempool is already allocated and if it can be resued. */
1255 if (mp != NULL && mp->elt_size >= obj_size && mp->size >= obj_num) {
1256 DRV_LOG(DEBUG, "port %u mempool %s is being reused",
1257 dev->data->port_id, mp->name);
1258 /* Reuse. */
1259 goto exit;
1260 } else if (mp != NULL) {
1261 DRV_LOG(DEBUG, "port %u mempool %s should be resized, freeing it",
1262 dev->data->port_id, mp->name);
1263 /*
1264 * If failed to free, which means it may be still in use, no way
1265 * but to keep using the existing one. On buffer underrun,
1266 * packets will be memcpy'd instead of external buffer
1267 * attachment.
1268 */
1269 if (mlx5_mprq_free_mp(dev)) {
1270 if (mp->elt_size >= obj_size)
1271 goto exit;
1272 else
1273 return -rte_errno;
1274 }
1275 }
1276 snprintf(name, sizeof(name), "port-%u-mprq", dev->data->port_id);
1277 mp = rte_mempool_create(name, obj_num, obj_size, MLX5_MPRQ_MP_CACHE_SZ,
1278 0, NULL, NULL, mlx5_mprq_buf_init, NULL,
1279 dev->device->numa_node, 0);
1280 if (mp == NULL) {
1281 DRV_LOG(ERR,
1282 "port %u failed to allocate a mempool for"
1283 " Multi-Packet RQ, count=%u, size=%u",
1284 dev->data->port_id, obj_num, obj_size);
1285 rte_errno = ENOMEM;
1286 return -rte_errno;
1287 }
1288 priv->mprq_mp = mp;
1289exit:
1290 /* Set mempool for each Rx queue. */
1291 for (i = 0; i != priv->rxqs_n; ++i) {
1292 struct mlx5_rxq_data *rxq = (*priv->rxqs)[i];
1293
1294 if (rxq == NULL)
1295 continue;
1296 rxq->mprq_mp = mp;
1297 }
1298 DRV_LOG(INFO, "port %u Multi-Packet RQ is configured",
1299 dev->data->port_id);
1300 return 0;
1301}
11fdf7f2 1302
9f95a23c
TL
1303/**
1304 * Create a DPDK Rx queue.
1305 *
1306 * @param dev
1307 * Pointer to Ethernet device.
1308 * @param idx
1309 * RX queue index.
1310 * @param desc
1311 * Number of descriptors to configure in queue.
1312 * @param socket
1313 * NUMA socket on which memory must be allocated.
1314 *
1315 * @return
1316 * A DPDK queue object on success, NULL otherwise and rte_errno is set.
1317 */
1318struct mlx5_rxq_ctrl *
1319mlx5_rxq_new(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
1320 unsigned int socket, const struct rte_eth_rxconf *conf,
1321 struct rte_mempool *mp)
1322{
1323 struct mlx5_priv *priv = dev->data->dev_private;
1324 struct mlx5_rxq_ctrl *tmpl;
1325 unsigned int mb_len = rte_pktmbuf_data_room_size(mp);
1326 unsigned int mprq_stride_size;
1327 struct mlx5_dev_config *config = &priv->config;
1328 /*
1329 * Always allocate extra slots, even if eventually
1330 * the vector Rx will not be used.
1331 */
1332 uint16_t desc_n =
1333 desc + config->rx_vec_en * MLX5_VPMD_DESCS_PER_LOOP;
1334 uint64_t offloads = conf->offloads |
1335 dev->data->dev_conf.rxmode.offloads;
1336 const int mprq_en = mlx5_check_mprq_support(dev) > 0;
1337
1338 tmpl = rte_calloc_socket("RXQ", 1,
1339 sizeof(*tmpl) +
1340 desc_n * sizeof(struct rte_mbuf *),
1341 0, socket);
1342 if (!tmpl) {
1343 rte_errno = ENOMEM;
1344 return NULL;
1345 }
1346 if (mlx5_mr_btree_init(&tmpl->rxq.mr_ctrl.cache_bh,
1347 MLX5_MR_BTREE_CACHE_N, socket)) {
1348 /* rte_errno is already set. */
1349 goto error;
1350 }
1351 tmpl->socket = socket;
1352 if (dev->data->dev_conf.intr_conf.rxq)
1353 tmpl->irq = 1;
1354 /*
1355 * This Rx queue can be configured as a Multi-Packet RQ if all of the
1356 * following conditions are met:
1357 * - MPRQ is enabled.
1358 * - The number of descs is more than the number of strides.
1359 * - max_rx_pkt_len plus overhead is less than the max size of a
1360 * stride.
1361 * Otherwise, enable Rx scatter if necessary.
1362 */
1363 assert(mb_len >= RTE_PKTMBUF_HEADROOM);
1364 mprq_stride_size =
1365 dev->data->dev_conf.rxmode.max_rx_pkt_len +
1366 sizeof(struct rte_mbuf_ext_shared_info) +
1367 RTE_PKTMBUF_HEADROOM;
1368 if (mprq_en &&
1369 desc > (1U << config->mprq.stride_num_n) &&
1370 mprq_stride_size <= (1U << config->mprq.max_stride_size_n)) {
1371 /* TODO: Rx scatter isn't supported yet. */
1372 tmpl->rxq.sges_n = 0;
1373 /* Trim the number of descs needed. */
1374 desc >>= config->mprq.stride_num_n;
1375 tmpl->rxq.strd_num_n = config->mprq.stride_num_n;
1376 tmpl->rxq.strd_sz_n = RTE_MAX(log2above(mprq_stride_size),
1377 config->mprq.min_stride_size_n);
1378 tmpl->rxq.strd_shift_en = MLX5_MPRQ_TWO_BYTE_SHIFT;
1379 tmpl->rxq.mprq_max_memcpy_len =
1380 RTE_MIN(mb_len - RTE_PKTMBUF_HEADROOM,
1381 config->mprq.max_memcpy_len);
1382 DRV_LOG(DEBUG,
1383 "port %u Rx queue %u: Multi-Packet RQ is enabled"
1384 " strd_num_n = %u, strd_sz_n = %u",
1385 dev->data->port_id, idx,
1386 tmpl->rxq.strd_num_n, tmpl->rxq.strd_sz_n);
1387 } else if (dev->data->dev_conf.rxmode.max_rx_pkt_len <=
1388 (mb_len - RTE_PKTMBUF_HEADROOM)) {
1389 tmpl->rxq.sges_n = 0;
1390 } else if (offloads & DEV_RX_OFFLOAD_SCATTER) {
1391 unsigned int size =
1392 RTE_PKTMBUF_HEADROOM +
1393 dev->data->dev_conf.rxmode.max_rx_pkt_len;
1394 unsigned int sges_n;
1395
1396 /*
1397 * Determine the number of SGEs needed for a full packet
1398 * and round it to the next power of two.
1399 */
1400 sges_n = log2above((size / mb_len) + !!(size % mb_len));
1401 tmpl->rxq.sges_n = sges_n;
1402 /* Make sure rxq.sges_n did not overflow. */
1403 size = mb_len * (1 << tmpl->rxq.sges_n);
1404 size -= RTE_PKTMBUF_HEADROOM;
1405 if (size < dev->data->dev_conf.rxmode.max_rx_pkt_len) {
1406 DRV_LOG(ERR,
1407 "port %u too many SGEs (%u) needed to handle"
1408 " requested maximum packet size %u",
1409 dev->data->port_id,
1410 1 << sges_n,
1411 dev->data->dev_conf.rxmode.max_rx_pkt_len);
1412 rte_errno = EOVERFLOW;
1413 goto error;
1414 }
1415 } else {
1416 DRV_LOG(WARNING,
1417 "port %u the requested maximum Rx packet size (%u) is"
1418 " larger than a single mbuf (%u) and scattered mode has"
1419 " not been requested",
1420 dev->data->port_id,
1421 dev->data->dev_conf.rxmode.max_rx_pkt_len,
1422 mb_len - RTE_PKTMBUF_HEADROOM);
1423 }
1424 if (mprq_en && !mlx5_rxq_mprq_enabled(&tmpl->rxq))
1425 DRV_LOG(WARNING,
1426 "port %u MPRQ is requested but cannot be enabled"
1427 " (requested: desc = %u, stride_sz = %u,"
1428 " supported: min_stride_num = %u, max_stride_sz = %u).",
1429 dev->data->port_id, desc, mprq_stride_size,
1430 (1 << config->mprq.stride_num_n),
1431 (1 << config->mprq.max_stride_size_n));
1432 DRV_LOG(DEBUG, "port %u maximum number of segments per packet: %u",
1433 dev->data->port_id, 1 << tmpl->rxq.sges_n);
1434 if (desc % (1 << tmpl->rxq.sges_n)) {
1435 DRV_LOG(ERR,
1436 "port %u number of Rx queue descriptors (%u) is not a"
1437 " multiple of SGEs per packet (%u)",
1438 dev->data->port_id,
1439 desc,
1440 1 << tmpl->rxq.sges_n);
1441 rte_errno = EINVAL;
1442 goto error;
1443 }
1444 /* Toggle RX checksum offload if hardware supports it. */
1445 tmpl->rxq.csum = !!(offloads & DEV_RX_OFFLOAD_CHECKSUM);
1446 tmpl->rxq.hw_timestamp = !!(offloads & DEV_RX_OFFLOAD_TIMESTAMP);
1447 /* Configure VLAN stripping. */
1448 tmpl->rxq.vlan_strip = !!(offloads & DEV_RX_OFFLOAD_VLAN_STRIP);
1449 /* By default, FCS (CRC) is stripped by hardware. */
1450 tmpl->rxq.crc_present = 0;
1451 if (offloads & DEV_RX_OFFLOAD_KEEP_CRC) {
1452 if (config->hw_fcs_strip) {
1453 tmpl->rxq.crc_present = 1;
1454 } else {
1455 DRV_LOG(WARNING,
1456 "port %u CRC stripping has been disabled but will"
1457 " still be performed by hardware, make sure MLNX_OFED"
1458 " and firmware are up to date",
1459 dev->data->port_id);
1460 }
1461 }
1462 DRV_LOG(DEBUG,
1463 "port %u CRC stripping is %s, %u bytes will be subtracted from"
1464 " incoming frames to hide it",
1465 dev->data->port_id,
1466 tmpl->rxq.crc_present ? "disabled" : "enabled",
1467 tmpl->rxq.crc_present << 2);
1468 /* Save port ID. */
1469 tmpl->rxq.rss_hash = !!priv->rss_conf.rss_hf &&
1470 (!!(dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_RSS));
1471 tmpl->rxq.port_id = dev->data->port_id;
1472 tmpl->priv = priv;
1473 tmpl->rxq.mp = mp;
1474 tmpl->rxq.elts_n = log2above(desc);
1475 tmpl->rxq.rq_repl_thresh =
1476 MLX5_VPMD_RXQ_RPLNSH_THRESH(1 << tmpl->rxq.elts_n);
1477 tmpl->rxq.elts =
1478 (struct rte_mbuf *(*)[1 << tmpl->rxq.elts_n])(tmpl + 1);
1479#ifndef RTE_ARCH_64
1480 tmpl->rxq.uar_lock_cq = &priv->uar_lock_cq;
1481#endif
1482 tmpl->rxq.idx = idx;
1483 rte_atomic32_inc(&tmpl->refcnt);
1484 LIST_INSERT_HEAD(&priv->rxqsctrl, tmpl, next);
1485 return tmpl;
1486error:
1487 rte_free(tmpl);
1488 return NULL;
1489}
1490
1491/**
1492 * Get a Rx queue.
1493 *
1494 * @param dev
1495 * Pointer to Ethernet device.
1496 * @param idx
1497 * RX queue index.
1498 *
1499 * @return
1500 * A pointer to the queue if it exists, NULL otherwise.
1501 */
1502struct mlx5_rxq_ctrl *
1503mlx5_rxq_get(struct rte_eth_dev *dev, uint16_t idx)
1504{
1505 struct mlx5_priv *priv = dev->data->dev_private;
1506 struct mlx5_rxq_ctrl *rxq_ctrl = NULL;
1507
1508 if ((*priv->rxqs)[idx]) {
1509 rxq_ctrl = container_of((*priv->rxqs)[idx],
1510 struct mlx5_rxq_ctrl,
1511 rxq);
1512 mlx5_rxq_ibv_get(dev, idx);
1513 rte_atomic32_inc(&rxq_ctrl->refcnt);
1514 }
1515 return rxq_ctrl;
1516}
1517
1518/**
1519 * Release a Rx queue.
1520 *
1521 * @param dev
1522 * Pointer to Ethernet device.
1523 * @param idx
1524 * RX queue index.
1525 *
1526 * @return
1527 * 1 while a reference on it exists, 0 when freed.
1528 */
1529int
1530mlx5_rxq_release(struct rte_eth_dev *dev, uint16_t idx)
1531{
1532 struct mlx5_priv *priv = dev->data->dev_private;
1533 struct mlx5_rxq_ctrl *rxq_ctrl;
1534
1535 if (!(*priv->rxqs)[idx])
1536 return 0;
1537 rxq_ctrl = container_of((*priv->rxqs)[idx], struct mlx5_rxq_ctrl, rxq);
1538 assert(rxq_ctrl->priv);
1539 if (rxq_ctrl->ibv && !mlx5_rxq_ibv_release(rxq_ctrl->ibv))
1540 rxq_ctrl->ibv = NULL;
1541 if (rte_atomic32_dec_and_test(&rxq_ctrl->refcnt)) {
1542 mlx5_mr_btree_free(&rxq_ctrl->rxq.mr_ctrl.cache_bh);
1543 LIST_REMOVE(rxq_ctrl, next);
1544 rte_free(rxq_ctrl);
1545 (*priv->rxqs)[idx] = NULL;
1546 return 0;
1547 }
1548 return 1;
1549}
1550
1551/**
1552 * Verify if the queue can be released.
1553 *
1554 * @param dev
1555 * Pointer to Ethernet device.
1556 * @param idx
1557 * RX queue index.
1558 *
1559 * @return
1560 * 1 if the queue can be released, negative errno otherwise and rte_errno is
1561 * set.
1562 */
1563int
1564mlx5_rxq_releasable(struct rte_eth_dev *dev, uint16_t idx)
1565{
1566 struct mlx5_priv *priv = dev->data->dev_private;
1567 struct mlx5_rxq_ctrl *rxq_ctrl;
1568
1569 if (!(*priv->rxqs)[idx]) {
1570 rte_errno = EINVAL;
1571 return -rte_errno;
1572 }
1573 rxq_ctrl = container_of((*priv->rxqs)[idx], struct mlx5_rxq_ctrl, rxq);
1574 return (rte_atomic32_read(&rxq_ctrl->refcnt) == 1);
1575}
1576
1577/**
1578 * Verify the Rx Queue list is empty
1579 *
1580 * @param dev
1581 * Pointer to Ethernet device.
1582 *
1583 * @return
1584 * The number of object not released.
1585 */
1586int
1587mlx5_rxq_verify(struct rte_eth_dev *dev)
1588{
1589 struct mlx5_priv *priv = dev->data->dev_private;
1590 struct mlx5_rxq_ctrl *rxq_ctrl;
1591 int ret = 0;
1592
1593 LIST_FOREACH(rxq_ctrl, &priv->rxqsctrl, next) {
1594 DRV_LOG(DEBUG, "port %u Rx Queue %u still referenced",
1595 dev->data->port_id, rxq_ctrl->rxq.idx);
1596 ++ret;
1597 }
1598 return ret;
1599}
1600
1601/**
1602 * Create an indirection table.
1603 *
1604 * @param dev
1605 * Pointer to Ethernet device.
1606 * @param queues
1607 * Queues entering in the indirection table.
1608 * @param queues_n
1609 * Number of queues in the array.
1610 *
1611 * @return
1612 * The Verbs object initialised, NULL otherwise and rte_errno is set.
1613 */
1614struct mlx5_ind_table_ibv *
1615mlx5_ind_table_ibv_new(struct rte_eth_dev *dev, const uint16_t *queues,
1616 uint32_t queues_n)
1617{
1618 struct mlx5_priv *priv = dev->data->dev_private;
1619 struct mlx5_ind_table_ibv *ind_tbl;
1620 const unsigned int wq_n = rte_is_power_of_2(queues_n) ?
1621 log2above(queues_n) :
1622 log2above(priv->config.ind_table_max_size);
1623 struct ibv_wq *wq[1 << wq_n];
1624 unsigned int i;
1625 unsigned int j;
1626
1627 ind_tbl = rte_calloc(__func__, 1, sizeof(*ind_tbl) +
1628 queues_n * sizeof(uint16_t), 0);
1629 if (!ind_tbl) {
1630 rte_errno = ENOMEM;
1631 return NULL;
1632 }
1633 for (i = 0; i != queues_n; ++i) {
1634 struct mlx5_rxq_ctrl *rxq = mlx5_rxq_get(dev, queues[i]);
1635
1636 if (!rxq)
1637 goto error;
1638 wq[i] = rxq->ibv->wq;
1639 ind_tbl->queues[i] = queues[i];
1640 }
1641 ind_tbl->queues_n = queues_n;
1642 /* Finalise indirection table. */
1643 for (j = 0; i != (unsigned int)(1 << wq_n); ++i, ++j)
1644 wq[i] = wq[j];
1645 ind_tbl->ind_table = mlx5_glue->create_rwq_ind_table
1646 (priv->sh->ctx,
1647 &(struct ibv_rwq_ind_table_init_attr){
1648 .log_ind_tbl_size = wq_n,
1649 .ind_tbl = wq,
1650 .comp_mask = 0,
1651 });
1652 if (!ind_tbl->ind_table) {
1653 rte_errno = errno;
1654 goto error;
1655 }
1656 rte_atomic32_inc(&ind_tbl->refcnt);
1657 LIST_INSERT_HEAD(&priv->ind_tbls, ind_tbl, next);
1658 return ind_tbl;
1659error:
1660 rte_free(ind_tbl);
1661 DEBUG("port %u cannot create indirection table", dev->data->port_id);
1662 return NULL;
1663}
1664
1665/**
1666 * Get an indirection table.
1667 *
1668 * @param dev
1669 * Pointer to Ethernet device.
1670 * @param queues
1671 * Queues entering in the indirection table.
1672 * @param queues_n
1673 * Number of queues in the array.
1674 *
1675 * @return
1676 * An indirection table if found.
1677 */
1678struct mlx5_ind_table_ibv *
1679mlx5_ind_table_ibv_get(struct rte_eth_dev *dev, const uint16_t *queues,
1680 uint32_t queues_n)
1681{
1682 struct mlx5_priv *priv = dev->data->dev_private;
1683 struct mlx5_ind_table_ibv *ind_tbl;
1684
1685 LIST_FOREACH(ind_tbl, &priv->ind_tbls, next) {
1686 if ((ind_tbl->queues_n == queues_n) &&
1687 (memcmp(ind_tbl->queues, queues,
1688 ind_tbl->queues_n * sizeof(ind_tbl->queues[0]))
1689 == 0))
1690 break;
1691 }
1692 if (ind_tbl) {
1693 unsigned int i;
1694
1695 rte_atomic32_inc(&ind_tbl->refcnt);
1696 for (i = 0; i != ind_tbl->queues_n; ++i)
1697 mlx5_rxq_get(dev, ind_tbl->queues[i]);
1698 }
1699 return ind_tbl;
1700}
1701
1702/**
1703 * Release an indirection table.
1704 *
1705 * @param dev
1706 * Pointer to Ethernet device.
1707 * @param ind_table
1708 * Indirection table to release.
1709 *
1710 * @return
1711 * 1 while a reference on it exists, 0 when freed.
1712 */
1713int
1714mlx5_ind_table_ibv_release(struct rte_eth_dev *dev,
1715 struct mlx5_ind_table_ibv *ind_tbl)
1716{
1717 unsigned int i;
1718
1719 if (rte_atomic32_dec_and_test(&ind_tbl->refcnt))
1720 claim_zero(mlx5_glue->destroy_rwq_ind_table
1721 (ind_tbl->ind_table));
1722 for (i = 0; i != ind_tbl->queues_n; ++i)
1723 claim_nonzero(mlx5_rxq_release(dev, ind_tbl->queues[i]));
1724 if (!rte_atomic32_read(&ind_tbl->refcnt)) {
1725 LIST_REMOVE(ind_tbl, next);
1726 rte_free(ind_tbl);
1727 return 0;
1728 }
1729 return 1;
1730}
1731
1732/**
1733 * Verify the Rx Queue list is empty
1734 *
1735 * @param dev
1736 * Pointer to Ethernet device.
1737 *
1738 * @return
1739 * The number of object not released.
1740 */
1741int
1742mlx5_ind_table_ibv_verify(struct rte_eth_dev *dev)
1743{
1744 struct mlx5_priv *priv = dev->data->dev_private;
1745 struct mlx5_ind_table_ibv *ind_tbl;
1746 int ret = 0;
1747
1748 LIST_FOREACH(ind_tbl, &priv->ind_tbls, next) {
1749 DRV_LOG(DEBUG,
1750 "port %u Verbs indirection table %p still referenced",
1751 dev->data->port_id, (void *)ind_tbl);
1752 ++ret;
1753 }
1754 return ret;
1755}
1756
1757/**
1758 * Create an Rx Hash queue.
1759 *
1760 * @param dev
1761 * Pointer to Ethernet device.
1762 * @param rss_key
1763 * RSS key for the Rx hash queue.
1764 * @param rss_key_len
1765 * RSS key length.
1766 * @param hash_fields
1767 * Verbs protocol hash field to make the RSS on.
1768 * @param queues
1769 * Queues entering in hash queue. In case of empty hash_fields only the
1770 * first queue index will be taken for the indirection table.
1771 * @param queues_n
1772 * Number of queues.
1773 * @param tunnel
1774 * Tunnel type.
1775 *
1776 * @return
1777 * The Verbs object initialised, NULL otherwise and rte_errno is set.
1778 */
1779struct mlx5_hrxq *
1780mlx5_hrxq_new(struct rte_eth_dev *dev,
1781 const uint8_t *rss_key, uint32_t rss_key_len,
1782 uint64_t hash_fields,
1783 const uint16_t *queues, uint32_t queues_n,
1784 int tunnel __rte_unused)
1785{
1786 struct mlx5_priv *priv = dev->data->dev_private;
1787 struct mlx5_hrxq *hrxq;
1788 struct mlx5_ind_table_ibv *ind_tbl;
1789 struct ibv_qp *qp;
1790#ifdef HAVE_IBV_DEVICE_TUNNEL_SUPPORT
1791 struct mlx5dv_qp_init_attr qp_init_attr;
1792#endif
1793 int err;
1794
1795 queues_n = hash_fields ? queues_n : 1;
1796 ind_tbl = mlx5_ind_table_ibv_get(dev, queues, queues_n);
1797 if (!ind_tbl)
1798 ind_tbl = mlx5_ind_table_ibv_new(dev, queues, queues_n);
1799 if (!ind_tbl) {
1800 rte_errno = ENOMEM;
1801 return NULL;
1802 }
1803#ifdef HAVE_IBV_DEVICE_TUNNEL_SUPPORT
1804 memset(&qp_init_attr, 0, sizeof(qp_init_attr));
1805 if (tunnel) {
1806 qp_init_attr.comp_mask =
1807 MLX5DV_QP_INIT_ATTR_MASK_QP_CREATE_FLAGS;
1808 qp_init_attr.create_flags = MLX5DV_QP_CREATE_TUNNEL_OFFLOADS;
1809 }
1810#ifdef HAVE_IBV_FLOW_DV_SUPPORT
1811 if (dev->data->dev_conf.lpbk_mode) {
1812 /* Allow packet sent from NIC loop back w/o source MAC check. */
1813 qp_init_attr.comp_mask |=
1814 MLX5DV_QP_INIT_ATTR_MASK_QP_CREATE_FLAGS;
1815 qp_init_attr.create_flags |=
1816 MLX5DV_QP_CREATE_TIR_ALLOW_SELF_LOOPBACK_UC;
1817 }
1818#endif
1819 qp = mlx5_glue->dv_create_qp
1820 (priv->sh->ctx,
1821 &(struct ibv_qp_init_attr_ex){
1822 .qp_type = IBV_QPT_RAW_PACKET,
1823 .comp_mask =
1824 IBV_QP_INIT_ATTR_PD |
1825 IBV_QP_INIT_ATTR_IND_TABLE |
1826 IBV_QP_INIT_ATTR_RX_HASH,
1827 .rx_hash_conf = (struct ibv_rx_hash_conf){
1828 .rx_hash_function = IBV_RX_HASH_FUNC_TOEPLITZ,
1829 .rx_hash_key_len = rss_key_len,
1830 .rx_hash_key = (void *)(uintptr_t)rss_key,
1831 .rx_hash_fields_mask = hash_fields,
1832 },
1833 .rwq_ind_tbl = ind_tbl->ind_table,
1834 .pd = priv->sh->pd,
1835 },
1836 &qp_init_attr);
1837#else
1838 qp = mlx5_glue->create_qp_ex
1839 (priv->sh->ctx,
1840 &(struct ibv_qp_init_attr_ex){
1841 .qp_type = IBV_QPT_RAW_PACKET,
1842 .comp_mask =
1843 IBV_QP_INIT_ATTR_PD |
1844 IBV_QP_INIT_ATTR_IND_TABLE |
1845 IBV_QP_INIT_ATTR_RX_HASH,
1846 .rx_hash_conf = (struct ibv_rx_hash_conf){
1847 .rx_hash_function = IBV_RX_HASH_FUNC_TOEPLITZ,
1848 .rx_hash_key_len = rss_key_len,
1849 .rx_hash_key = (void *)(uintptr_t)rss_key,
1850 .rx_hash_fields_mask = hash_fields,
1851 },
1852 .rwq_ind_tbl = ind_tbl->ind_table,
1853 .pd = priv->sh->pd,
1854 });
1855#endif
1856 if (!qp) {
1857 rte_errno = errno;
1858 goto error;
1859 }
1860 hrxq = rte_calloc(__func__, 1, sizeof(*hrxq) + rss_key_len, 0);
1861 if (!hrxq)
1862 goto error;
1863 hrxq->ind_table = ind_tbl;
1864 hrxq->qp = qp;
1865 hrxq->rss_key_len = rss_key_len;
1866 hrxq->hash_fields = hash_fields;
1867 memcpy(hrxq->rss_key, rss_key, rss_key_len);
1868#ifdef HAVE_IBV_FLOW_DV_SUPPORT
1869 hrxq->action = mlx5_glue->dv_create_flow_action_dest_ibv_qp(hrxq->qp);
1870 if (!hrxq->action) {
1871 rte_errno = errno;
1872 goto error;
1873 }
1874#endif
1875 rte_atomic32_inc(&hrxq->refcnt);
1876 LIST_INSERT_HEAD(&priv->hrxqs, hrxq, next);
1877 return hrxq;
1878error:
1879 err = rte_errno; /* Save rte_errno before cleanup. */
1880 mlx5_ind_table_ibv_release(dev, ind_tbl);
1881 if (qp)
1882 claim_zero(mlx5_glue->destroy_qp(qp));
1883 rte_errno = err; /* Restore rte_errno. */
1884 return NULL;
1885}
1886
1887/**
1888 * Get an Rx Hash queue.
1889 *
1890 * @param dev
1891 * Pointer to Ethernet device.
1892 * @param rss_conf
1893 * RSS configuration for the Rx hash queue.
1894 * @param queues
1895 * Queues entering in hash queue. In case of empty hash_fields only the
1896 * first queue index will be taken for the indirection table.
1897 * @param queues_n
1898 * Number of queues.
1899 *
1900 * @return
1901 * An hash Rx queue on success.
1902 */
1903struct mlx5_hrxq *
1904mlx5_hrxq_get(struct rte_eth_dev *dev,
1905 const uint8_t *rss_key, uint32_t rss_key_len,
1906 uint64_t hash_fields,
1907 const uint16_t *queues, uint32_t queues_n)
1908{
1909 struct mlx5_priv *priv = dev->data->dev_private;
1910 struct mlx5_hrxq *hrxq;
1911
1912 queues_n = hash_fields ? queues_n : 1;
1913 LIST_FOREACH(hrxq, &priv->hrxqs, next) {
1914 struct mlx5_ind_table_ibv *ind_tbl;
1915
1916 if (hrxq->rss_key_len != rss_key_len)
1917 continue;
1918 if (memcmp(hrxq->rss_key, rss_key, rss_key_len))
1919 continue;
1920 if (hrxq->hash_fields != hash_fields)
1921 continue;
1922 ind_tbl = mlx5_ind_table_ibv_get(dev, queues, queues_n);
1923 if (!ind_tbl)
1924 continue;
1925 if (ind_tbl != hrxq->ind_table) {
1926 mlx5_ind_table_ibv_release(dev, ind_tbl);
1927 continue;
1928 }
1929 rte_atomic32_inc(&hrxq->refcnt);
1930 return hrxq;
1931 }
1932 return NULL;
1933}
1934
1935/**
1936 * Release the hash Rx queue.
1937 *
1938 * @param dev
1939 * Pointer to Ethernet device.
1940 * @param hrxq
1941 * Pointer to Hash Rx queue to release.
1942 *
1943 * @return
1944 * 1 while a reference on it exists, 0 when freed.
1945 */
1946int
1947mlx5_hrxq_release(struct rte_eth_dev *dev, struct mlx5_hrxq *hrxq)
1948{
1949 if (rte_atomic32_dec_and_test(&hrxq->refcnt)) {
1950#ifdef HAVE_IBV_FLOW_DV_SUPPORT
1951 mlx5_glue->destroy_flow_action(hrxq->action);
1952#endif
1953 claim_zero(mlx5_glue->destroy_qp(hrxq->qp));
1954 mlx5_ind_table_ibv_release(dev, hrxq->ind_table);
1955 LIST_REMOVE(hrxq, next);
1956 rte_free(hrxq);
1957 return 0;
1958 }
1959 claim_nonzero(mlx5_ind_table_ibv_release(dev, hrxq->ind_table));
1960 return 1;
1961}
1962
1963/**
1964 * Verify the Rx Queue list is empty
1965 *
1966 * @param dev
1967 * Pointer to Ethernet device.
1968 *
1969 * @return
1970 * The number of object not released.
1971 */
1972int
1973mlx5_hrxq_ibv_verify(struct rte_eth_dev *dev)
1974{
1975 struct mlx5_priv *priv = dev->data->dev_private;
1976 struct mlx5_hrxq *hrxq;
1977 int ret = 0;
1978
1979 LIST_FOREACH(hrxq, &priv->hrxqs, next) {
1980 DRV_LOG(DEBUG,
1981 "port %u Verbs hash Rx queue %p still referenced",
1982 dev->data->port_id, (void *)hrxq);
1983 ++ret;
1984 }
1985 return ret;
1986}
1987
1988/**
1989 * Create a drop Rx queue Verbs object.
1990 *
1991 * @param dev
1992 * Pointer to Ethernet device.
1993 *
1994 * @return
1995 * The Verbs object initialised, NULL otherwise and rte_errno is set.
1996 */
1997struct mlx5_rxq_ibv *
1998mlx5_rxq_ibv_drop_new(struct rte_eth_dev *dev)
1999{
2000 struct mlx5_priv *priv = dev->data->dev_private;
2001 struct ibv_context *ctx = priv->sh->ctx;
2002 struct ibv_cq *cq;
2003 struct ibv_wq *wq = NULL;
2004 struct mlx5_rxq_ibv *rxq;
2005
2006 if (priv->drop_queue.rxq)
2007 return priv->drop_queue.rxq;
2008 cq = mlx5_glue->create_cq(ctx, 1, NULL, NULL, 0);
2009 if (!cq) {
2010 DEBUG("port %u cannot allocate CQ for drop queue",
2011 dev->data->port_id);
2012 rte_errno = errno;
2013 goto error;
2014 }
2015 wq = mlx5_glue->create_wq(ctx,
2016 &(struct ibv_wq_init_attr){
2017 .wq_type = IBV_WQT_RQ,
2018 .max_wr = 1,
2019 .max_sge = 1,
2020 .pd = priv->sh->pd,
2021 .cq = cq,
2022 });
2023 if (!wq) {
2024 DEBUG("port %u cannot allocate WQ for drop queue",
2025 dev->data->port_id);
2026 rte_errno = errno;
2027 goto error;
2028 }
2029 rxq = rte_calloc(__func__, 1, sizeof(*rxq), 0);
2030 if (!rxq) {
2031 DEBUG("port %u cannot allocate drop Rx queue memory",
2032 dev->data->port_id);
2033 rte_errno = ENOMEM;
2034 goto error;
2035 }
2036 rxq->cq = cq;
2037 rxq->wq = wq;
2038 priv->drop_queue.rxq = rxq;
2039 return rxq;
2040error:
2041 if (wq)
2042 claim_zero(mlx5_glue->destroy_wq(wq));
2043 if (cq)
2044 claim_zero(mlx5_glue->destroy_cq(cq));
2045 return NULL;
2046}
2047
2048/**
2049 * Release a drop Rx queue Verbs object.
2050 *
2051 * @param dev
2052 * Pointer to Ethernet device.
2053 *
2054 * @return
2055 * The Verbs object initialised, NULL otherwise and rte_errno is set.
2056 */
2057void
2058mlx5_rxq_ibv_drop_release(struct rte_eth_dev *dev)
2059{
2060 struct mlx5_priv *priv = dev->data->dev_private;
2061 struct mlx5_rxq_ibv *rxq = priv->drop_queue.rxq;
2062
2063 if (rxq->wq)
2064 claim_zero(mlx5_glue->destroy_wq(rxq->wq));
2065 if (rxq->cq)
2066 claim_zero(mlx5_glue->destroy_cq(rxq->cq));
2067 rte_free(rxq);
2068 priv->drop_queue.rxq = NULL;
2069}
2070
2071/**
2072 * Create a drop indirection table.
2073 *
2074 * @param dev
2075 * Pointer to Ethernet device.
2076 *
2077 * @return
2078 * The Verbs object initialised, NULL otherwise and rte_errno is set.
2079 */
2080struct mlx5_ind_table_ibv *
2081mlx5_ind_table_ibv_drop_new(struct rte_eth_dev *dev)
2082{
2083 struct mlx5_priv *priv = dev->data->dev_private;
2084 struct mlx5_ind_table_ibv *ind_tbl;
2085 struct mlx5_rxq_ibv *rxq;
2086 struct mlx5_ind_table_ibv tmpl;
2087
2088 rxq = mlx5_rxq_ibv_drop_new(dev);
2089 if (!rxq)
2090 return NULL;
2091 tmpl.ind_table = mlx5_glue->create_rwq_ind_table
2092 (priv->sh->ctx,
2093 &(struct ibv_rwq_ind_table_init_attr){
2094 .log_ind_tbl_size = 0,
2095 .ind_tbl = &rxq->wq,
2096 .comp_mask = 0,
2097 });
2098 if (!tmpl.ind_table) {
2099 DEBUG("port %u cannot allocate indirection table for drop"
2100 " queue",
2101 dev->data->port_id);
2102 rte_errno = errno;
2103 goto error;
2104 }
2105 ind_tbl = rte_calloc(__func__, 1, sizeof(*ind_tbl), 0);
2106 if (!ind_tbl) {
2107 rte_errno = ENOMEM;
2108 goto error;
2109 }
2110 ind_tbl->ind_table = tmpl.ind_table;
2111 return ind_tbl;
2112error:
2113 mlx5_rxq_ibv_drop_release(dev);
2114 return NULL;
2115}
2116
2117/**
2118 * Release a drop indirection table.
2119 *
2120 * @param dev
2121 * Pointer to Ethernet device.
2122 */
2123void
2124mlx5_ind_table_ibv_drop_release(struct rte_eth_dev *dev)
2125{
2126 struct mlx5_priv *priv = dev->data->dev_private;
2127 struct mlx5_ind_table_ibv *ind_tbl = priv->drop_queue.hrxq->ind_table;
2128
2129 claim_zero(mlx5_glue->destroy_rwq_ind_table(ind_tbl->ind_table));
2130 mlx5_rxq_ibv_drop_release(dev);
2131 rte_free(ind_tbl);
2132 priv->drop_queue.hrxq->ind_table = NULL;
2133}
2134
2135/**
2136 * Create a drop Rx Hash queue.
2137 *
2138 * @param dev
2139 * Pointer to Ethernet device.
2140 *
2141 * @return
2142 * The Verbs object initialised, NULL otherwise and rte_errno is set.
2143 */
2144struct mlx5_hrxq *
2145mlx5_hrxq_drop_new(struct rte_eth_dev *dev)
2146{
2147 struct mlx5_priv *priv = dev->data->dev_private;
2148 struct mlx5_ind_table_ibv *ind_tbl;
2149 struct ibv_qp *qp;
2150 struct mlx5_hrxq *hrxq;
2151
2152 if (priv->drop_queue.hrxq) {
2153 rte_atomic32_inc(&priv->drop_queue.hrxq->refcnt);
2154 return priv->drop_queue.hrxq;
2155 }
2156 ind_tbl = mlx5_ind_table_ibv_drop_new(dev);
2157 if (!ind_tbl)
2158 return NULL;
2159 qp = mlx5_glue->create_qp_ex(priv->sh->ctx,
2160 &(struct ibv_qp_init_attr_ex){
2161 .qp_type = IBV_QPT_RAW_PACKET,
2162 .comp_mask =
2163 IBV_QP_INIT_ATTR_PD |
2164 IBV_QP_INIT_ATTR_IND_TABLE |
2165 IBV_QP_INIT_ATTR_RX_HASH,
2166 .rx_hash_conf = (struct ibv_rx_hash_conf){
2167 .rx_hash_function =
2168 IBV_RX_HASH_FUNC_TOEPLITZ,
2169 .rx_hash_key_len = MLX5_RSS_HASH_KEY_LEN,
2170 .rx_hash_key = rss_hash_default_key,
2171 .rx_hash_fields_mask = 0,
2172 },
2173 .rwq_ind_tbl = ind_tbl->ind_table,
2174 .pd = priv->sh->pd
2175 });
2176 if (!qp) {
2177 DEBUG("port %u cannot allocate QP for drop queue",
2178 dev->data->port_id);
2179 rte_errno = errno;
2180 goto error;
2181 }
2182 hrxq = rte_calloc(__func__, 1, sizeof(*hrxq), 0);
2183 if (!hrxq) {
2184 DRV_LOG(WARNING,
2185 "port %u cannot allocate memory for drop queue",
2186 dev->data->port_id);
2187 rte_errno = ENOMEM;
2188 goto error;
2189 }
2190 hrxq->ind_table = ind_tbl;
2191 hrxq->qp = qp;
2192#ifdef HAVE_IBV_FLOW_DV_SUPPORT
2193 hrxq->action = mlx5_glue->dv_create_flow_action_dest_ibv_qp(hrxq->qp);
2194 if (!hrxq->action) {
2195 rte_errno = errno;
2196 goto error;
2197 }
2198#endif
2199 priv->drop_queue.hrxq = hrxq;
2200 rte_atomic32_set(&hrxq->refcnt, 1);
2201 return hrxq;
2202error:
2203 if (ind_tbl)
2204 mlx5_ind_table_ibv_drop_release(dev);
2205 return NULL;
2206}
2207
2208/**
2209 * Release a drop hash Rx queue.
2210 *
2211 * @param dev
2212 * Pointer to Ethernet device.
2213 */
2214void
2215mlx5_hrxq_drop_release(struct rte_eth_dev *dev)
2216{
2217 struct mlx5_priv *priv = dev->data->dev_private;
2218 struct mlx5_hrxq *hrxq = priv->drop_queue.hrxq;
2219
2220 if (rte_atomic32_dec_and_test(&hrxq->refcnt)) {
2221#ifdef HAVE_IBV_FLOW_DV_SUPPORT
2222 mlx5_glue->destroy_flow_action(hrxq->action);
2223#endif
2224 claim_zero(mlx5_glue->destroy_qp(hrxq->qp));
2225 mlx5_ind_table_ibv_drop_release(dev);
2226 rte_free(hrxq);
2227 priv->drop_queue.hrxq = NULL;
2228 }
11fdf7f2 2229}