]> git.proxmox.com Git - ceph.git/blob - ceph/src/spdk/dpdk/drivers/net/mlx5/mlx5_rxq.c
update source to Ceph Pacific 16.2.2
[ceph.git] / ceph / src / spdk / dpdk / drivers / net / mlx5 / mlx5_rxq.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright 2015 6WIND S.A.
3 * Copyright 2015 Mellanox Technologies, Ltd
4 */
5
6 #include <stddef.h>
7 #include <errno.h>
8 #include <string.h>
9 #include <stdint.h>
10 #include <fcntl.h>
11 #include <sys/queue.h>
12
13 /* Verbs header. */
14 /* ISO C doesn't support unnamed structs/unions, disabling -pedantic. */
15 #ifdef PEDANTIC
16 #pragma GCC diagnostic ignored "-Wpedantic"
17 #endif
18 #include <infiniband/verbs.h>
19 #include <infiniband/mlx5dv.h>
20 #ifdef PEDANTIC
21 #pragma GCC diagnostic error "-Wpedantic"
22 #endif
23
24 #include <rte_mbuf.h>
25 #include <rte_malloc.h>
26 #include <rte_ethdev_driver.h>
27 #include <rte_common.h>
28 #include <rte_interrupts.h>
29 #include <rte_debug.h>
30 #include <rte_io.h>
31
32 #include <mlx5_glue.h>
33 #include <mlx5_devx_cmds.h>
34
35 #include "mlx5_defs.h"
36 #include "mlx5.h"
37 #include "mlx5_rxtx.h"
38 #include "mlx5_utils.h"
39 #include "mlx5_autoconf.h"
40 #include "mlx5_flow.h"
41
42
43 /* Default RSS hash key also used for ConnectX-3. */
44 uint8_t rss_hash_default_key[] = {
45 0x2c, 0xc6, 0x81, 0xd1,
46 0x5b, 0xdb, 0xf4, 0xf7,
47 0xfc, 0xa2, 0x83, 0x19,
48 0xdb, 0x1a, 0x3e, 0x94,
49 0x6b, 0x9e, 0x38, 0xd9,
50 0x2c, 0x9c, 0x03, 0xd1,
51 0xad, 0x99, 0x44, 0xa7,
52 0xd9, 0x56, 0x3d, 0x59,
53 0x06, 0x3c, 0x25, 0xf3,
54 0xfc, 0x1f, 0xdc, 0x2a,
55 };
56
57 /* Length of the default RSS hash key. */
58 static_assert(MLX5_RSS_HASH_KEY_LEN ==
59 (unsigned int)sizeof(rss_hash_default_key),
60 "wrong RSS default key size.");
61
62 /**
63 * Check whether Multi-Packet RQ can be enabled for the device.
64 *
65 * @param dev
66 * Pointer to Ethernet device.
67 *
68 * @return
69 * 1 if supported, negative errno value if not.
70 */
71 inline int
72 mlx5_check_mprq_support(struct rte_eth_dev *dev)
73 {
74 struct mlx5_priv *priv = dev->data->dev_private;
75
76 if (priv->config.mprq.enabled &&
77 priv->rxqs_n >= priv->config.mprq.min_rxqs_num)
78 return 1;
79 return -ENOTSUP;
80 }
81
82 /**
83 * Check whether Multi-Packet RQ is enabled for the Rx queue.
84 *
85 * @param rxq
86 * Pointer to receive queue structure.
87 *
88 * @return
89 * 0 if disabled, otherwise enabled.
90 */
91 inline int
92 mlx5_rxq_mprq_enabled(struct mlx5_rxq_data *rxq)
93 {
94 return rxq->strd_num_n > 0;
95 }
96
97 /**
98 * Check whether Multi-Packet RQ is enabled for the device.
99 *
100 * @param dev
101 * Pointer to Ethernet device.
102 *
103 * @return
104 * 0 if disabled, otherwise enabled.
105 */
106 inline int
107 mlx5_mprq_enabled(struct rte_eth_dev *dev)
108 {
109 struct mlx5_priv *priv = dev->data->dev_private;
110 uint16_t i;
111 uint16_t n = 0;
112 uint16_t n_ibv = 0;
113
114 if (mlx5_check_mprq_support(dev) < 0)
115 return 0;
116 /* All the configured queues should be enabled. */
117 for (i = 0; i < priv->rxqs_n; ++i) {
118 struct mlx5_rxq_data *rxq = (*priv->rxqs)[i];
119 struct mlx5_rxq_ctrl *rxq_ctrl = container_of
120 (rxq, struct mlx5_rxq_ctrl, rxq);
121
122 if (rxq == NULL || rxq_ctrl->type != MLX5_RXQ_TYPE_STANDARD)
123 continue;
124 n_ibv++;
125 if (mlx5_rxq_mprq_enabled(rxq))
126 ++n;
127 }
128 /* Multi-Packet RQ can't be partially configured. */
129 MLX5_ASSERT(n == 0 || n == n_ibv);
130 return n == n_ibv;
131 }
132
133 /**
134 * Allocate RX queue elements for Multi-Packet RQ.
135 *
136 * @param rxq_ctrl
137 * Pointer to RX queue structure.
138 *
139 * @return
140 * 0 on success, a negative errno value otherwise and rte_errno is set.
141 */
142 static int
143 rxq_alloc_elts_mprq(struct mlx5_rxq_ctrl *rxq_ctrl)
144 {
145 struct mlx5_rxq_data *rxq = &rxq_ctrl->rxq;
146 unsigned int wqe_n = 1 << rxq->elts_n;
147 unsigned int i;
148 int err;
149
150 /* Iterate on segments. */
151 for (i = 0; i <= wqe_n; ++i) {
152 struct mlx5_mprq_buf *buf;
153
154 if (rte_mempool_get(rxq->mprq_mp, (void **)&buf) < 0) {
155 DRV_LOG(ERR, "port %u empty mbuf pool", rxq->port_id);
156 rte_errno = ENOMEM;
157 goto error;
158 }
159 if (i < wqe_n)
160 (*rxq->mprq_bufs)[i] = buf;
161 else
162 rxq->mprq_repl = buf;
163 }
164 DRV_LOG(DEBUG,
165 "port %u Rx queue %u allocated and configured %u segments",
166 rxq->port_id, rxq->idx, wqe_n);
167 return 0;
168 error:
169 err = rte_errno; /* Save rte_errno before cleanup. */
170 wqe_n = i;
171 for (i = 0; (i != wqe_n); ++i) {
172 if ((*rxq->mprq_bufs)[i] != NULL)
173 rte_mempool_put(rxq->mprq_mp,
174 (*rxq->mprq_bufs)[i]);
175 (*rxq->mprq_bufs)[i] = NULL;
176 }
177 DRV_LOG(DEBUG, "port %u Rx queue %u failed, freed everything",
178 rxq->port_id, rxq->idx);
179 rte_errno = err; /* Restore rte_errno. */
180 return -rte_errno;
181 }
182
183 /**
184 * Allocate RX queue elements for Single-Packet RQ.
185 *
186 * @param rxq_ctrl
187 * Pointer to RX queue structure.
188 *
189 * @return
190 * 0 on success, errno value on failure.
191 */
192 static int
193 rxq_alloc_elts_sprq(struct mlx5_rxq_ctrl *rxq_ctrl)
194 {
195 const unsigned int sges_n = 1 << rxq_ctrl->rxq.sges_n;
196 unsigned int elts_n = 1 << rxq_ctrl->rxq.elts_n;
197 unsigned int i;
198 int err;
199
200 /* Iterate on segments. */
201 for (i = 0; (i != elts_n); ++i) {
202 struct rte_mbuf *buf;
203
204 buf = rte_pktmbuf_alloc(rxq_ctrl->rxq.mp);
205 if (buf == NULL) {
206 DRV_LOG(ERR, "port %u empty mbuf pool",
207 PORT_ID(rxq_ctrl->priv));
208 rte_errno = ENOMEM;
209 goto error;
210 }
211 /* Headroom is reserved by rte_pktmbuf_alloc(). */
212 MLX5_ASSERT(DATA_OFF(buf) == RTE_PKTMBUF_HEADROOM);
213 /* Buffer is supposed to be empty. */
214 MLX5_ASSERT(rte_pktmbuf_data_len(buf) == 0);
215 MLX5_ASSERT(rte_pktmbuf_pkt_len(buf) == 0);
216 MLX5_ASSERT(!buf->next);
217 /* Only the first segment keeps headroom. */
218 if (i % sges_n)
219 SET_DATA_OFF(buf, 0);
220 PORT(buf) = rxq_ctrl->rxq.port_id;
221 DATA_LEN(buf) = rte_pktmbuf_tailroom(buf);
222 PKT_LEN(buf) = DATA_LEN(buf);
223 NB_SEGS(buf) = 1;
224 (*rxq_ctrl->rxq.elts)[i] = buf;
225 }
226 /* If Rx vector is activated. */
227 if (mlx5_rxq_check_vec_support(&rxq_ctrl->rxq) > 0) {
228 struct mlx5_rxq_data *rxq = &rxq_ctrl->rxq;
229 struct rte_mbuf *mbuf_init = &rxq->fake_mbuf;
230 struct rte_pktmbuf_pool_private *priv =
231 (struct rte_pktmbuf_pool_private *)
232 rte_mempool_get_priv(rxq_ctrl->rxq.mp);
233 int j;
234
235 /* Initialize default rearm_data for vPMD. */
236 mbuf_init->data_off = RTE_PKTMBUF_HEADROOM;
237 rte_mbuf_refcnt_set(mbuf_init, 1);
238 mbuf_init->nb_segs = 1;
239 mbuf_init->port = rxq->port_id;
240 if (priv->flags & RTE_PKTMBUF_POOL_F_PINNED_EXT_BUF)
241 mbuf_init->ol_flags = EXT_ATTACHED_MBUF;
242 /*
243 * prevent compiler reordering:
244 * rearm_data covers previous fields.
245 */
246 rte_compiler_barrier();
247 rxq->mbuf_initializer =
248 *(rte_xmm_t *)&mbuf_init->rearm_data;
249 /* Padding with a fake mbuf for vectorized Rx. */
250 for (j = 0; j < MLX5_VPMD_DESCS_PER_LOOP; ++j)
251 (*rxq->elts)[elts_n + j] = &rxq->fake_mbuf;
252 }
253 DRV_LOG(DEBUG,
254 "port %u Rx queue %u allocated and configured %u segments"
255 " (max %u packets)",
256 PORT_ID(rxq_ctrl->priv), rxq_ctrl->rxq.idx, elts_n,
257 elts_n / (1 << rxq_ctrl->rxq.sges_n));
258 return 0;
259 error:
260 err = rte_errno; /* Save rte_errno before cleanup. */
261 elts_n = i;
262 for (i = 0; (i != elts_n); ++i) {
263 if ((*rxq_ctrl->rxq.elts)[i] != NULL)
264 rte_pktmbuf_free_seg((*rxq_ctrl->rxq.elts)[i]);
265 (*rxq_ctrl->rxq.elts)[i] = NULL;
266 }
267 DRV_LOG(DEBUG, "port %u Rx queue %u failed, freed everything",
268 PORT_ID(rxq_ctrl->priv), rxq_ctrl->rxq.idx);
269 rte_errno = err; /* Restore rte_errno. */
270 return -rte_errno;
271 }
272
273 /**
274 * Allocate RX queue elements.
275 *
276 * @param rxq_ctrl
277 * Pointer to RX queue structure.
278 *
279 * @return
280 * 0 on success, errno value on failure.
281 */
282 int
283 rxq_alloc_elts(struct mlx5_rxq_ctrl *rxq_ctrl)
284 {
285 return mlx5_rxq_mprq_enabled(&rxq_ctrl->rxq) ?
286 rxq_alloc_elts_mprq(rxq_ctrl) : rxq_alloc_elts_sprq(rxq_ctrl);
287 }
288
289 /**
290 * Free RX queue elements for Multi-Packet RQ.
291 *
292 * @param rxq_ctrl
293 * Pointer to RX queue structure.
294 */
295 static void
296 rxq_free_elts_mprq(struct mlx5_rxq_ctrl *rxq_ctrl)
297 {
298 struct mlx5_rxq_data *rxq = &rxq_ctrl->rxq;
299 uint16_t i;
300
301 DRV_LOG(DEBUG, "port %u Multi-Packet Rx queue %u freeing WRs",
302 rxq->port_id, rxq->idx);
303 if (rxq->mprq_bufs == NULL)
304 return;
305 MLX5_ASSERT(mlx5_rxq_check_vec_support(rxq) < 0);
306 for (i = 0; (i != (1u << rxq->elts_n)); ++i) {
307 if ((*rxq->mprq_bufs)[i] != NULL)
308 mlx5_mprq_buf_free((*rxq->mprq_bufs)[i]);
309 (*rxq->mprq_bufs)[i] = NULL;
310 }
311 if (rxq->mprq_repl != NULL) {
312 mlx5_mprq_buf_free(rxq->mprq_repl);
313 rxq->mprq_repl = NULL;
314 }
315 }
316
317 /**
318 * Free RX queue elements for Single-Packet RQ.
319 *
320 * @param rxq_ctrl
321 * Pointer to RX queue structure.
322 */
323 static void
324 rxq_free_elts_sprq(struct mlx5_rxq_ctrl *rxq_ctrl)
325 {
326 struct mlx5_rxq_data *rxq = &rxq_ctrl->rxq;
327 const uint16_t q_n = (1 << rxq->elts_n);
328 const uint16_t q_mask = q_n - 1;
329 uint16_t used = q_n - (rxq->rq_ci - rxq->rq_pi);
330 uint16_t i;
331
332 DRV_LOG(DEBUG, "port %u Rx queue %u freeing WRs",
333 PORT_ID(rxq_ctrl->priv), rxq->idx);
334 if (rxq->elts == NULL)
335 return;
336 /**
337 * Some mbuf in the Ring belongs to the application. They cannot be
338 * freed.
339 */
340 if (mlx5_rxq_check_vec_support(rxq) > 0) {
341 for (i = 0; i < used; ++i)
342 (*rxq->elts)[(rxq->rq_ci + i) & q_mask] = NULL;
343 rxq->rq_pi = rxq->rq_ci;
344 }
345 for (i = 0; (i != (1u << rxq->elts_n)); ++i) {
346 if ((*rxq->elts)[i] != NULL)
347 rte_pktmbuf_free_seg((*rxq->elts)[i]);
348 (*rxq->elts)[i] = NULL;
349 }
350 }
351
352 /**
353 * Free RX queue elements.
354 *
355 * @param rxq_ctrl
356 * Pointer to RX queue structure.
357 */
358 static void
359 rxq_free_elts(struct mlx5_rxq_ctrl *rxq_ctrl)
360 {
361 if (mlx5_rxq_mprq_enabled(&rxq_ctrl->rxq))
362 rxq_free_elts_mprq(rxq_ctrl);
363 else
364 rxq_free_elts_sprq(rxq_ctrl);
365 }
366
367 /**
368 * Returns the per-queue supported offloads.
369 *
370 * @param dev
371 * Pointer to Ethernet device.
372 *
373 * @return
374 * Supported Rx offloads.
375 */
376 uint64_t
377 mlx5_get_rx_queue_offloads(struct rte_eth_dev *dev)
378 {
379 struct mlx5_priv *priv = dev->data->dev_private;
380 struct mlx5_dev_config *config = &priv->config;
381 uint64_t offloads = (DEV_RX_OFFLOAD_SCATTER |
382 DEV_RX_OFFLOAD_TIMESTAMP |
383 DEV_RX_OFFLOAD_JUMBO_FRAME |
384 DEV_RX_OFFLOAD_RSS_HASH);
385
386 if (config->hw_fcs_strip)
387 offloads |= DEV_RX_OFFLOAD_KEEP_CRC;
388
389 if (config->hw_csum)
390 offloads |= (DEV_RX_OFFLOAD_IPV4_CKSUM |
391 DEV_RX_OFFLOAD_UDP_CKSUM |
392 DEV_RX_OFFLOAD_TCP_CKSUM);
393 if (config->hw_vlan_strip)
394 offloads |= DEV_RX_OFFLOAD_VLAN_STRIP;
395 if (MLX5_LRO_SUPPORTED(dev))
396 offloads |= DEV_RX_OFFLOAD_TCP_LRO;
397 return offloads;
398 }
399
400
401 /**
402 * Returns the per-port supported offloads.
403 *
404 * @return
405 * Supported Rx offloads.
406 */
407 uint64_t
408 mlx5_get_rx_port_offloads(void)
409 {
410 uint64_t offloads = DEV_RX_OFFLOAD_VLAN_FILTER;
411
412 return offloads;
413 }
414
415 /**
416 * Verify if the queue can be released.
417 *
418 * @param dev
419 * Pointer to Ethernet device.
420 * @param idx
421 * RX queue index.
422 *
423 * @return
424 * 1 if the queue can be released
425 * 0 if the queue can not be released, there are references to it.
426 * Negative errno and rte_errno is set if queue doesn't exist.
427 */
428 static int
429 mlx5_rxq_releasable(struct rte_eth_dev *dev, uint16_t idx)
430 {
431 struct mlx5_priv *priv = dev->data->dev_private;
432 struct mlx5_rxq_ctrl *rxq_ctrl;
433
434 if (!(*priv->rxqs)[idx]) {
435 rte_errno = EINVAL;
436 return -rte_errno;
437 }
438 rxq_ctrl = container_of((*priv->rxqs)[idx], struct mlx5_rxq_ctrl, rxq);
439 return (rte_atomic32_read(&rxq_ctrl->refcnt) == 1);
440 }
441
442 /**
443 * Rx queue presetup checks.
444 *
445 * @param dev
446 * Pointer to Ethernet device structure.
447 * @param idx
448 * RX queue index.
449 * @param desc
450 * Number of descriptors to configure in queue.
451 *
452 * @return
453 * 0 on success, a negative errno value otherwise and rte_errno is set.
454 */
455 static int
456 mlx5_rx_queue_pre_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc)
457 {
458 struct mlx5_priv *priv = dev->data->dev_private;
459
460 if (!rte_is_power_of_2(desc)) {
461 desc = 1 << log2above(desc);
462 DRV_LOG(WARNING,
463 "port %u increased number of descriptors in Rx queue %u"
464 " to the next power of two (%d)",
465 dev->data->port_id, idx, desc);
466 }
467 DRV_LOG(DEBUG, "port %u configuring Rx queue %u for %u descriptors",
468 dev->data->port_id, idx, desc);
469 if (idx >= priv->rxqs_n) {
470 DRV_LOG(ERR, "port %u Rx queue index out of range (%u >= %u)",
471 dev->data->port_id, idx, priv->rxqs_n);
472 rte_errno = EOVERFLOW;
473 return -rte_errno;
474 }
475 if (!mlx5_rxq_releasable(dev, idx)) {
476 DRV_LOG(ERR, "port %u unable to release queue index %u",
477 dev->data->port_id, idx);
478 rte_errno = EBUSY;
479 return -rte_errno;
480 }
481 mlx5_rxq_release(dev, idx);
482 return 0;
483 }
484
485 /**
486 *
487 * @param dev
488 * Pointer to Ethernet device structure.
489 * @param idx
490 * RX queue index.
491 * @param desc
492 * Number of descriptors to configure in queue.
493 * @param socket
494 * NUMA socket on which memory must be allocated.
495 * @param[in] conf
496 * Thresholds parameters.
497 * @param mp
498 * Memory pool for buffer allocations.
499 *
500 * @return
501 * 0 on success, a negative errno value otherwise and rte_errno is set.
502 */
503 int
504 mlx5_rx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
505 unsigned int socket, const struct rte_eth_rxconf *conf,
506 struct rte_mempool *mp)
507 {
508 struct mlx5_priv *priv = dev->data->dev_private;
509 struct mlx5_rxq_data *rxq = (*priv->rxqs)[idx];
510 struct mlx5_rxq_ctrl *rxq_ctrl =
511 container_of(rxq, struct mlx5_rxq_ctrl, rxq);
512 int res;
513
514 res = mlx5_rx_queue_pre_setup(dev, idx, desc);
515 if (res)
516 return res;
517 rxq_ctrl = mlx5_rxq_new(dev, idx, desc, socket, conf, mp);
518 if (!rxq_ctrl) {
519 DRV_LOG(ERR, "port %u unable to allocate queue index %u",
520 dev->data->port_id, idx);
521 rte_errno = ENOMEM;
522 return -rte_errno;
523 }
524 DRV_LOG(DEBUG, "port %u adding Rx queue %u to list",
525 dev->data->port_id, idx);
526 (*priv->rxqs)[idx] = &rxq_ctrl->rxq;
527 return 0;
528 }
529
530 /**
531 *
532 * @param dev
533 * Pointer to Ethernet device structure.
534 * @param idx
535 * RX queue index.
536 * @param desc
537 * Number of descriptors to configure in queue.
538 * @param hairpin_conf
539 * Hairpin configuration parameters.
540 *
541 * @return
542 * 0 on success, a negative errno value otherwise and rte_errno is set.
543 */
544 int
545 mlx5_rx_hairpin_queue_setup(struct rte_eth_dev *dev, uint16_t idx,
546 uint16_t desc,
547 const struct rte_eth_hairpin_conf *hairpin_conf)
548 {
549 struct mlx5_priv *priv = dev->data->dev_private;
550 struct mlx5_rxq_data *rxq = (*priv->rxqs)[idx];
551 struct mlx5_rxq_ctrl *rxq_ctrl =
552 container_of(rxq, struct mlx5_rxq_ctrl, rxq);
553 int res;
554
555 res = mlx5_rx_queue_pre_setup(dev, idx, desc);
556 if (res)
557 return res;
558 if (hairpin_conf->peer_count != 1 ||
559 hairpin_conf->peers[0].port != dev->data->port_id ||
560 hairpin_conf->peers[0].queue >= priv->txqs_n) {
561 DRV_LOG(ERR, "port %u unable to setup hairpin queue index %u "
562 " invalid hairpind configuration", dev->data->port_id,
563 idx);
564 rte_errno = EINVAL;
565 return -rte_errno;
566 }
567 rxq_ctrl = mlx5_rxq_hairpin_new(dev, idx, desc, hairpin_conf);
568 if (!rxq_ctrl) {
569 DRV_LOG(ERR, "port %u unable to allocate queue index %u",
570 dev->data->port_id, idx);
571 rte_errno = ENOMEM;
572 return -rte_errno;
573 }
574 DRV_LOG(DEBUG, "port %u adding Rx queue %u to list",
575 dev->data->port_id, idx);
576 (*priv->rxqs)[idx] = &rxq_ctrl->rxq;
577 return 0;
578 }
579
580 /**
581 * DPDK callback to release a RX queue.
582 *
583 * @param dpdk_rxq
584 * Generic RX queue pointer.
585 */
586 void
587 mlx5_rx_queue_release(void *dpdk_rxq)
588 {
589 struct mlx5_rxq_data *rxq = (struct mlx5_rxq_data *)dpdk_rxq;
590 struct mlx5_rxq_ctrl *rxq_ctrl;
591 struct mlx5_priv *priv;
592
593 if (rxq == NULL)
594 return;
595 rxq_ctrl = container_of(rxq, struct mlx5_rxq_ctrl, rxq);
596 priv = rxq_ctrl->priv;
597 if (!mlx5_rxq_releasable(ETH_DEV(priv), rxq_ctrl->rxq.idx))
598 rte_panic("port %u Rx queue %u is still used by a flow and"
599 " cannot be removed\n",
600 PORT_ID(priv), rxq->idx);
601 mlx5_rxq_release(ETH_DEV(priv), rxq_ctrl->rxq.idx);
602 }
603
604 /**
605 * Get an Rx queue Verbs/DevX object.
606 *
607 * @param dev
608 * Pointer to Ethernet device.
609 * @param idx
610 * Queue index in DPDK Rx queue array
611 *
612 * @return
613 * The Verbs/DevX object if it exists.
614 */
615 static struct mlx5_rxq_obj *
616 mlx5_rxq_obj_get(struct rte_eth_dev *dev, uint16_t idx)
617 {
618 struct mlx5_priv *priv = dev->data->dev_private;
619 struct mlx5_rxq_data *rxq_data = (*priv->rxqs)[idx];
620 struct mlx5_rxq_ctrl *rxq_ctrl;
621
622 if (idx >= priv->rxqs_n)
623 return NULL;
624 if (!rxq_data)
625 return NULL;
626 rxq_ctrl = container_of(rxq_data, struct mlx5_rxq_ctrl, rxq);
627 if (rxq_ctrl->obj)
628 rte_atomic32_inc(&rxq_ctrl->obj->refcnt);
629 return rxq_ctrl->obj;
630 }
631
632 /**
633 * Release the resources allocated for an RQ DevX object.
634 *
635 * @param rxq_ctrl
636 * DevX Rx queue object.
637 */
638 static void
639 rxq_release_rq_resources(struct mlx5_rxq_ctrl *rxq_ctrl)
640 {
641 if (rxq_ctrl->rxq.wqes) {
642 rte_free((void *)(uintptr_t)rxq_ctrl->rxq.wqes);
643 rxq_ctrl->rxq.wqes = NULL;
644 }
645 if (rxq_ctrl->wq_umem) {
646 mlx5_glue->devx_umem_dereg(rxq_ctrl->wq_umem);
647 rxq_ctrl->wq_umem = NULL;
648 }
649 }
650
651 /**
652 * Release an Rx hairpin related resources.
653 *
654 * @param rxq_obj
655 * Hairpin Rx queue object.
656 */
657 static void
658 rxq_obj_hairpin_release(struct mlx5_rxq_obj *rxq_obj)
659 {
660 struct mlx5_devx_modify_rq_attr rq_attr = { 0 };
661
662 MLX5_ASSERT(rxq_obj);
663 rq_attr.state = MLX5_RQC_STATE_RST;
664 rq_attr.rq_state = MLX5_RQC_STATE_RDY;
665 mlx5_devx_cmd_modify_rq(rxq_obj->rq, &rq_attr);
666 claim_zero(mlx5_devx_cmd_destroy(rxq_obj->rq));
667 }
668
669 /**
670 * Release an Rx verbs/DevX queue object.
671 *
672 * @param rxq_obj
673 * Verbs/DevX Rx queue object.
674 *
675 * @return
676 * 1 while a reference on it exists, 0 when freed.
677 */
678 static int
679 mlx5_rxq_obj_release(struct mlx5_rxq_obj *rxq_obj)
680 {
681 MLX5_ASSERT(rxq_obj);
682 if (rte_atomic32_dec_and_test(&rxq_obj->refcnt)) {
683 switch (rxq_obj->type) {
684 case MLX5_RXQ_OBJ_TYPE_IBV:
685 MLX5_ASSERT(rxq_obj->wq);
686 MLX5_ASSERT(rxq_obj->cq);
687 rxq_free_elts(rxq_obj->rxq_ctrl);
688 claim_zero(mlx5_glue->destroy_wq(rxq_obj->wq));
689 claim_zero(mlx5_glue->destroy_cq(rxq_obj->cq));
690 break;
691 case MLX5_RXQ_OBJ_TYPE_DEVX_RQ:
692 MLX5_ASSERT(rxq_obj->cq);
693 MLX5_ASSERT(rxq_obj->rq);
694 rxq_free_elts(rxq_obj->rxq_ctrl);
695 claim_zero(mlx5_devx_cmd_destroy(rxq_obj->rq));
696 rxq_release_rq_resources(rxq_obj->rxq_ctrl);
697 claim_zero(mlx5_glue->destroy_cq(rxq_obj->cq));
698 break;
699 case MLX5_RXQ_OBJ_TYPE_DEVX_HAIRPIN:
700 MLX5_ASSERT(rxq_obj->rq);
701 rxq_obj_hairpin_release(rxq_obj);
702 break;
703 }
704 if (rxq_obj->channel)
705 claim_zero(mlx5_glue->destroy_comp_channel
706 (rxq_obj->channel));
707 LIST_REMOVE(rxq_obj, next);
708 rte_free(rxq_obj);
709 return 0;
710 }
711 return 1;
712 }
713
714 /**
715 * Allocate queue vector and fill epoll fd list for Rx interrupts.
716 *
717 * @param dev
718 * Pointer to Ethernet device.
719 *
720 * @return
721 * 0 on success, a negative errno value otherwise and rte_errno is set.
722 */
723 int
724 mlx5_rx_intr_vec_enable(struct rte_eth_dev *dev)
725 {
726 struct mlx5_priv *priv = dev->data->dev_private;
727 unsigned int i;
728 unsigned int rxqs_n = priv->rxqs_n;
729 unsigned int n = RTE_MIN(rxqs_n, (uint32_t)RTE_MAX_RXTX_INTR_VEC_ID);
730 unsigned int count = 0;
731 struct rte_intr_handle *intr_handle = dev->intr_handle;
732
733 if (!dev->data->dev_conf.intr_conf.rxq)
734 return 0;
735 mlx5_rx_intr_vec_disable(dev);
736 intr_handle->intr_vec = malloc(n * sizeof(intr_handle->intr_vec[0]));
737 if (intr_handle->intr_vec == NULL) {
738 DRV_LOG(ERR,
739 "port %u failed to allocate memory for interrupt"
740 " vector, Rx interrupts will not be supported",
741 dev->data->port_id);
742 rte_errno = ENOMEM;
743 return -rte_errno;
744 }
745 intr_handle->type = RTE_INTR_HANDLE_EXT;
746 for (i = 0; i != n; ++i) {
747 /* This rxq obj must not be released in this function. */
748 struct mlx5_rxq_obj *rxq_obj = mlx5_rxq_obj_get(dev, i);
749 int fd;
750 int flags;
751 int rc;
752
753 /* Skip queues that cannot request interrupts. */
754 if (!rxq_obj || !rxq_obj->channel) {
755 /* Use invalid intr_vec[] index to disable entry. */
756 intr_handle->intr_vec[i] =
757 RTE_INTR_VEC_RXTX_OFFSET +
758 RTE_MAX_RXTX_INTR_VEC_ID;
759 continue;
760 }
761 if (count >= RTE_MAX_RXTX_INTR_VEC_ID) {
762 DRV_LOG(ERR,
763 "port %u too many Rx queues for interrupt"
764 " vector size (%d), Rx interrupts cannot be"
765 " enabled",
766 dev->data->port_id, RTE_MAX_RXTX_INTR_VEC_ID);
767 mlx5_rx_intr_vec_disable(dev);
768 rte_errno = ENOMEM;
769 return -rte_errno;
770 }
771 fd = rxq_obj->channel->fd;
772 flags = fcntl(fd, F_GETFL);
773 rc = fcntl(fd, F_SETFL, flags | O_NONBLOCK);
774 if (rc < 0) {
775 rte_errno = errno;
776 DRV_LOG(ERR,
777 "port %u failed to make Rx interrupt file"
778 " descriptor %d non-blocking for queue index"
779 " %d",
780 dev->data->port_id, fd, i);
781 mlx5_rx_intr_vec_disable(dev);
782 return -rte_errno;
783 }
784 intr_handle->intr_vec[i] = RTE_INTR_VEC_RXTX_OFFSET + count;
785 intr_handle->efds[count] = fd;
786 count++;
787 }
788 if (!count)
789 mlx5_rx_intr_vec_disable(dev);
790 else
791 intr_handle->nb_efd = count;
792 return 0;
793 }
794
795 /**
796 * Clean up Rx interrupts handler.
797 *
798 * @param dev
799 * Pointer to Ethernet device.
800 */
801 void
802 mlx5_rx_intr_vec_disable(struct rte_eth_dev *dev)
803 {
804 struct mlx5_priv *priv = dev->data->dev_private;
805 struct rte_intr_handle *intr_handle = dev->intr_handle;
806 unsigned int i;
807 unsigned int rxqs_n = priv->rxqs_n;
808 unsigned int n = RTE_MIN(rxqs_n, (uint32_t)RTE_MAX_RXTX_INTR_VEC_ID);
809
810 if (!dev->data->dev_conf.intr_conf.rxq)
811 return;
812 if (!intr_handle->intr_vec)
813 goto free;
814 for (i = 0; i != n; ++i) {
815 struct mlx5_rxq_ctrl *rxq_ctrl;
816 struct mlx5_rxq_data *rxq_data;
817
818 if (intr_handle->intr_vec[i] == RTE_INTR_VEC_RXTX_OFFSET +
819 RTE_MAX_RXTX_INTR_VEC_ID)
820 continue;
821 /**
822 * Need to access directly the queue to release the reference
823 * kept in mlx5_rx_intr_vec_enable().
824 */
825 rxq_data = (*priv->rxqs)[i];
826 rxq_ctrl = container_of(rxq_data, struct mlx5_rxq_ctrl, rxq);
827 if (rxq_ctrl->obj)
828 mlx5_rxq_obj_release(rxq_ctrl->obj);
829 }
830 free:
831 rte_intr_free_epoll_fd(intr_handle);
832 if (intr_handle->intr_vec)
833 free(intr_handle->intr_vec);
834 intr_handle->nb_efd = 0;
835 intr_handle->intr_vec = NULL;
836 }
837
838 /**
839 * MLX5 CQ notification .
840 *
841 * @param rxq
842 * Pointer to receive queue structure.
843 * @param sq_n_rxq
844 * Sequence number per receive queue .
845 */
846 static inline void
847 mlx5_arm_cq(struct mlx5_rxq_data *rxq, int sq_n_rxq)
848 {
849 int sq_n = 0;
850 uint32_t doorbell_hi;
851 uint64_t doorbell;
852 void *cq_db_reg = (char *)rxq->cq_uar + MLX5_CQ_DOORBELL;
853
854 sq_n = sq_n_rxq & MLX5_CQ_SQN_MASK;
855 doorbell_hi = sq_n << MLX5_CQ_SQN_OFFSET | (rxq->cq_ci & MLX5_CI_MASK);
856 doorbell = (uint64_t)doorbell_hi << 32;
857 doorbell |= rxq->cqn;
858 rxq->cq_db[MLX5_CQ_ARM_DB] = rte_cpu_to_be_32(doorbell_hi);
859 mlx5_uar_write64(rte_cpu_to_be_64(doorbell),
860 cq_db_reg, rxq->uar_lock_cq);
861 }
862
863 /**
864 * DPDK callback for Rx queue interrupt enable.
865 *
866 * @param dev
867 * Pointer to Ethernet device structure.
868 * @param rx_queue_id
869 * Rx queue number.
870 *
871 * @return
872 * 0 on success, a negative errno value otherwise and rte_errno is set.
873 */
874 int
875 mlx5_rx_intr_enable(struct rte_eth_dev *dev, uint16_t rx_queue_id)
876 {
877 struct mlx5_priv *priv = dev->data->dev_private;
878 struct mlx5_rxq_data *rxq_data;
879 struct mlx5_rxq_ctrl *rxq_ctrl;
880
881 rxq_data = (*priv->rxqs)[rx_queue_id];
882 if (!rxq_data) {
883 rte_errno = EINVAL;
884 return -rte_errno;
885 }
886 rxq_ctrl = container_of(rxq_data, struct mlx5_rxq_ctrl, rxq);
887 if (rxq_ctrl->irq) {
888 struct mlx5_rxq_obj *rxq_obj;
889
890 rxq_obj = mlx5_rxq_obj_get(dev, rx_queue_id);
891 if (!rxq_obj) {
892 rte_errno = EINVAL;
893 return -rte_errno;
894 }
895 mlx5_arm_cq(rxq_data, rxq_data->cq_arm_sn);
896 mlx5_rxq_obj_release(rxq_obj);
897 }
898 return 0;
899 }
900
901 /**
902 * DPDK callback for Rx queue interrupt disable.
903 *
904 * @param dev
905 * Pointer to Ethernet device structure.
906 * @param rx_queue_id
907 * Rx queue number.
908 *
909 * @return
910 * 0 on success, a negative errno value otherwise and rte_errno is set.
911 */
912 int
913 mlx5_rx_intr_disable(struct rte_eth_dev *dev, uint16_t rx_queue_id)
914 {
915 struct mlx5_priv *priv = dev->data->dev_private;
916 struct mlx5_rxq_data *rxq_data;
917 struct mlx5_rxq_ctrl *rxq_ctrl;
918 struct mlx5_rxq_obj *rxq_obj = NULL;
919 struct ibv_cq *ev_cq;
920 void *ev_ctx;
921 int ret;
922
923 rxq_data = (*priv->rxqs)[rx_queue_id];
924 if (!rxq_data) {
925 rte_errno = EINVAL;
926 return -rte_errno;
927 }
928 rxq_ctrl = container_of(rxq_data, struct mlx5_rxq_ctrl, rxq);
929 if (!rxq_ctrl->irq)
930 return 0;
931 rxq_obj = mlx5_rxq_obj_get(dev, rx_queue_id);
932 if (!rxq_obj) {
933 rte_errno = EINVAL;
934 return -rte_errno;
935 }
936 ret = mlx5_glue->get_cq_event(rxq_obj->channel, &ev_cq, &ev_ctx);
937 if (ret || ev_cq != rxq_obj->cq) {
938 rte_errno = EINVAL;
939 goto exit;
940 }
941 rxq_data->cq_arm_sn++;
942 mlx5_glue->ack_cq_events(rxq_obj->cq, 1);
943 mlx5_rxq_obj_release(rxq_obj);
944 return 0;
945 exit:
946 ret = rte_errno; /* Save rte_errno before cleanup. */
947 if (rxq_obj)
948 mlx5_rxq_obj_release(rxq_obj);
949 DRV_LOG(WARNING, "port %u unable to disable interrupt on Rx queue %d",
950 dev->data->port_id, rx_queue_id);
951 rte_errno = ret; /* Restore rte_errno. */
952 return -rte_errno;
953 }
954
955 /**
956 * Create a CQ Verbs object.
957 *
958 * @param dev
959 * Pointer to Ethernet device.
960 * @param priv
961 * Pointer to device private data.
962 * @param rxq_data
963 * Pointer to Rx queue data.
964 * @param cqe_n
965 * Number of CQEs in CQ.
966 * @param rxq_obj
967 * Pointer to Rx queue object data.
968 *
969 * @return
970 * The Verbs object initialised, NULL otherwise and rte_errno is set.
971 */
972 static struct ibv_cq *
973 mlx5_ibv_cq_new(struct rte_eth_dev *dev, struct mlx5_priv *priv,
974 struct mlx5_rxq_data *rxq_data,
975 unsigned int cqe_n, struct mlx5_rxq_obj *rxq_obj)
976 {
977 struct {
978 struct ibv_cq_init_attr_ex ibv;
979 struct mlx5dv_cq_init_attr mlx5;
980 } cq_attr;
981
982 cq_attr.ibv = (struct ibv_cq_init_attr_ex){
983 .cqe = cqe_n,
984 .channel = rxq_obj->channel,
985 .comp_mask = 0,
986 };
987 cq_attr.mlx5 = (struct mlx5dv_cq_init_attr){
988 .comp_mask = 0,
989 };
990 if (priv->config.cqe_comp && !rxq_data->hw_timestamp &&
991 !rxq_data->lro) {
992 cq_attr.mlx5.comp_mask |=
993 MLX5DV_CQ_INIT_ATTR_MASK_COMPRESSED_CQE;
994 #ifdef HAVE_IBV_DEVICE_STRIDING_RQ_SUPPORT
995 cq_attr.mlx5.cqe_comp_res_format =
996 mlx5_rxq_mprq_enabled(rxq_data) ?
997 MLX5DV_CQE_RES_FORMAT_CSUM_STRIDX :
998 MLX5DV_CQE_RES_FORMAT_HASH;
999 #else
1000 cq_attr.mlx5.cqe_comp_res_format = MLX5DV_CQE_RES_FORMAT_HASH;
1001 #endif
1002 /*
1003 * For vectorized Rx, it must not be doubled in order to
1004 * make cq_ci and rq_ci aligned.
1005 */
1006 if (mlx5_rxq_check_vec_support(rxq_data) < 0)
1007 cq_attr.ibv.cqe *= 2;
1008 } else if (priv->config.cqe_comp && rxq_data->hw_timestamp) {
1009 DRV_LOG(DEBUG,
1010 "port %u Rx CQE compression is disabled for HW"
1011 " timestamp",
1012 dev->data->port_id);
1013 } else if (priv->config.cqe_comp && rxq_data->lro) {
1014 DRV_LOG(DEBUG,
1015 "port %u Rx CQE compression is disabled for LRO",
1016 dev->data->port_id);
1017 }
1018 #ifdef HAVE_IBV_MLX5_MOD_CQE_128B_PAD
1019 if (priv->config.cqe_pad) {
1020 cq_attr.mlx5.comp_mask |= MLX5DV_CQ_INIT_ATTR_MASK_FLAGS;
1021 cq_attr.mlx5.flags |= MLX5DV_CQ_INIT_ATTR_FLAGS_CQE_PAD;
1022 }
1023 #endif
1024 return mlx5_glue->cq_ex_to_cq(mlx5_glue->dv_create_cq(priv->sh->ctx,
1025 &cq_attr.ibv,
1026 &cq_attr.mlx5));
1027 }
1028
1029 /**
1030 * Create a WQ Verbs object.
1031 *
1032 * @param dev
1033 * Pointer to Ethernet device.
1034 * @param priv
1035 * Pointer to device private data.
1036 * @param rxq_data
1037 * Pointer to Rx queue data.
1038 * @param idx
1039 * Queue index in DPDK Rx queue array
1040 * @param wqe_n
1041 * Number of WQEs in WQ.
1042 * @param rxq_obj
1043 * Pointer to Rx queue object data.
1044 *
1045 * @return
1046 * The Verbs object initialised, NULL otherwise and rte_errno is set.
1047 */
1048 static struct ibv_wq *
1049 mlx5_ibv_wq_new(struct rte_eth_dev *dev, struct mlx5_priv *priv,
1050 struct mlx5_rxq_data *rxq_data, uint16_t idx,
1051 unsigned int wqe_n, struct mlx5_rxq_obj *rxq_obj)
1052 {
1053 struct {
1054 struct ibv_wq_init_attr ibv;
1055 #ifdef HAVE_IBV_DEVICE_STRIDING_RQ_SUPPORT
1056 struct mlx5dv_wq_init_attr mlx5;
1057 #endif
1058 } wq_attr;
1059
1060 wq_attr.ibv = (struct ibv_wq_init_attr){
1061 .wq_context = NULL, /* Could be useful in the future. */
1062 .wq_type = IBV_WQT_RQ,
1063 /* Max number of outstanding WRs. */
1064 .max_wr = wqe_n >> rxq_data->sges_n,
1065 /* Max number of scatter/gather elements in a WR. */
1066 .max_sge = 1 << rxq_data->sges_n,
1067 .pd = priv->sh->pd,
1068 .cq = rxq_obj->cq,
1069 .comp_mask = IBV_WQ_FLAGS_CVLAN_STRIPPING | 0,
1070 .create_flags = (rxq_data->vlan_strip ?
1071 IBV_WQ_FLAGS_CVLAN_STRIPPING : 0),
1072 };
1073 /* By default, FCS (CRC) is stripped by hardware. */
1074 if (rxq_data->crc_present) {
1075 wq_attr.ibv.create_flags |= IBV_WQ_FLAGS_SCATTER_FCS;
1076 wq_attr.ibv.comp_mask |= IBV_WQ_INIT_ATTR_FLAGS;
1077 }
1078 if (priv->config.hw_padding) {
1079 #if defined(HAVE_IBV_WQ_FLAG_RX_END_PADDING)
1080 wq_attr.ibv.create_flags |= IBV_WQ_FLAG_RX_END_PADDING;
1081 wq_attr.ibv.comp_mask |= IBV_WQ_INIT_ATTR_FLAGS;
1082 #elif defined(HAVE_IBV_WQ_FLAGS_PCI_WRITE_END_PADDING)
1083 wq_attr.ibv.create_flags |= IBV_WQ_FLAGS_PCI_WRITE_END_PADDING;
1084 wq_attr.ibv.comp_mask |= IBV_WQ_INIT_ATTR_FLAGS;
1085 #endif
1086 }
1087 #ifdef HAVE_IBV_DEVICE_STRIDING_RQ_SUPPORT
1088 wq_attr.mlx5 = (struct mlx5dv_wq_init_attr){
1089 .comp_mask = 0,
1090 };
1091 if (mlx5_rxq_mprq_enabled(rxq_data)) {
1092 struct mlx5dv_striding_rq_init_attr *mprq_attr =
1093 &wq_attr.mlx5.striding_rq_attrs;
1094
1095 wq_attr.mlx5.comp_mask |= MLX5DV_WQ_INIT_ATTR_MASK_STRIDING_RQ;
1096 *mprq_attr = (struct mlx5dv_striding_rq_init_attr){
1097 .single_stride_log_num_of_bytes = rxq_data->strd_sz_n,
1098 .single_wqe_log_num_of_strides = rxq_data->strd_num_n,
1099 .two_byte_shift_en = MLX5_MPRQ_TWO_BYTE_SHIFT,
1100 };
1101 }
1102 rxq_obj->wq = mlx5_glue->dv_create_wq(priv->sh->ctx, &wq_attr.ibv,
1103 &wq_attr.mlx5);
1104 #else
1105 rxq_obj->wq = mlx5_glue->create_wq(priv->sh->ctx, &wq_attr.ibv);
1106 #endif
1107 if (rxq_obj->wq) {
1108 /*
1109 * Make sure number of WRs*SGEs match expectations since a queue
1110 * cannot allocate more than "desc" buffers.
1111 */
1112 if (wq_attr.ibv.max_wr != (wqe_n >> rxq_data->sges_n) ||
1113 wq_attr.ibv.max_sge != (1u << rxq_data->sges_n)) {
1114 DRV_LOG(ERR,
1115 "port %u Rx queue %u requested %u*%u but got"
1116 " %u*%u WRs*SGEs",
1117 dev->data->port_id, idx,
1118 wqe_n >> rxq_data->sges_n,
1119 (1 << rxq_data->sges_n),
1120 wq_attr.ibv.max_wr, wq_attr.ibv.max_sge);
1121 claim_zero(mlx5_glue->destroy_wq(rxq_obj->wq));
1122 rxq_obj->wq = NULL;
1123 rte_errno = EINVAL;
1124 }
1125 }
1126 return rxq_obj->wq;
1127 }
1128
1129 /**
1130 * Fill common fields of create RQ attributes structure.
1131 *
1132 * @param rxq_data
1133 * Pointer to Rx queue data.
1134 * @param cqn
1135 * CQ number to use with this RQ.
1136 * @param rq_attr
1137 * RQ attributes structure to fill..
1138 */
1139 static void
1140 mlx5_devx_create_rq_attr_fill(struct mlx5_rxq_data *rxq_data, uint32_t cqn,
1141 struct mlx5_devx_create_rq_attr *rq_attr)
1142 {
1143 rq_attr->state = MLX5_RQC_STATE_RST;
1144 rq_attr->vsd = (rxq_data->vlan_strip) ? 0 : 1;
1145 rq_attr->cqn = cqn;
1146 rq_attr->scatter_fcs = (rxq_data->crc_present) ? 1 : 0;
1147 }
1148
1149 /**
1150 * Fill common fields of DevX WQ attributes structure.
1151 *
1152 * @param priv
1153 * Pointer to device private data.
1154 * @param rxq_ctrl
1155 * Pointer to Rx queue control structure.
1156 * @param wq_attr
1157 * WQ attributes structure to fill..
1158 */
1159 static void
1160 mlx5_devx_wq_attr_fill(struct mlx5_priv *priv, struct mlx5_rxq_ctrl *rxq_ctrl,
1161 struct mlx5_devx_wq_attr *wq_attr)
1162 {
1163 wq_attr->end_padding_mode = priv->config.cqe_pad ?
1164 MLX5_WQ_END_PAD_MODE_ALIGN :
1165 MLX5_WQ_END_PAD_MODE_NONE;
1166 wq_attr->pd = priv->sh->pdn;
1167 wq_attr->dbr_addr = rxq_ctrl->dbr_offset;
1168 wq_attr->dbr_umem_id = rxq_ctrl->dbr_umem_id;
1169 wq_attr->dbr_umem_valid = 1;
1170 wq_attr->wq_umem_id = rxq_ctrl->wq_umem->umem_id;
1171 wq_attr->wq_umem_valid = 1;
1172 }
1173
1174 /**
1175 * Create a RQ object using DevX.
1176 *
1177 * @param dev
1178 * Pointer to Ethernet device.
1179 * @param idx
1180 * Queue index in DPDK Rx queue array
1181 * @param cqn
1182 * CQ number to use with this RQ.
1183 *
1184 * @return
1185 * The DevX object initialised, NULL otherwise and rte_errno is set.
1186 */
1187 static struct mlx5_devx_obj *
1188 mlx5_devx_rq_new(struct rte_eth_dev *dev, uint16_t idx, uint32_t cqn)
1189 {
1190 struct mlx5_priv *priv = dev->data->dev_private;
1191 struct mlx5_rxq_data *rxq_data = (*priv->rxqs)[idx];
1192 struct mlx5_rxq_ctrl *rxq_ctrl =
1193 container_of(rxq_data, struct mlx5_rxq_ctrl, rxq);
1194 struct mlx5_devx_create_rq_attr rq_attr;
1195 uint32_t wqe_n = 1 << (rxq_data->elts_n - rxq_data->sges_n);
1196 uint32_t wq_size = 0;
1197 uint32_t wqe_size = 0;
1198 uint32_t log_wqe_size = 0;
1199 void *buf = NULL;
1200 struct mlx5_devx_obj *rq;
1201
1202 memset(&rq_attr, 0, sizeof(rq_attr));
1203 /* Fill RQ attributes. */
1204 rq_attr.mem_rq_type = MLX5_RQC_MEM_RQ_TYPE_MEMORY_RQ_INLINE;
1205 rq_attr.flush_in_error_en = 1;
1206 mlx5_devx_create_rq_attr_fill(rxq_data, cqn, &rq_attr);
1207 /* Fill WQ attributes for this RQ. */
1208 if (mlx5_rxq_mprq_enabled(rxq_data)) {
1209 rq_attr.wq_attr.wq_type = MLX5_WQ_TYPE_CYCLIC_STRIDING_RQ;
1210 /*
1211 * Number of strides in each WQE:
1212 * 512*2^single_wqe_log_num_of_strides.
1213 */
1214 rq_attr.wq_attr.single_wqe_log_num_of_strides =
1215 rxq_data->strd_num_n -
1216 MLX5_MIN_SINGLE_WQE_LOG_NUM_STRIDES;
1217 /* Stride size = (2^single_stride_log_num_of_bytes)*64B. */
1218 rq_attr.wq_attr.single_stride_log_num_of_bytes =
1219 rxq_data->strd_sz_n -
1220 MLX5_MIN_SINGLE_STRIDE_LOG_NUM_BYTES;
1221 wqe_size = sizeof(struct mlx5_wqe_mprq);
1222 } else {
1223 rq_attr.wq_attr.wq_type = MLX5_WQ_TYPE_CYCLIC;
1224 wqe_size = sizeof(struct mlx5_wqe_data_seg);
1225 }
1226 log_wqe_size = log2above(wqe_size) + rxq_data->sges_n;
1227 rq_attr.wq_attr.log_wq_stride = log_wqe_size;
1228 rq_attr.wq_attr.log_wq_sz = rxq_data->elts_n - rxq_data->sges_n;
1229 /* Calculate and allocate WQ memory space. */
1230 wqe_size = 1 << log_wqe_size; /* round up power of two.*/
1231 wq_size = wqe_n * wqe_size;
1232 buf = rte_calloc_socket(__func__, 1, wq_size, MLX5_WQE_BUF_ALIGNMENT,
1233 rxq_ctrl->socket);
1234 if (!buf)
1235 return NULL;
1236 rxq_data->wqes = buf;
1237 rxq_ctrl->wq_umem = mlx5_glue->devx_umem_reg(priv->sh->ctx,
1238 buf, wq_size, 0);
1239 if (!rxq_ctrl->wq_umem) {
1240 rte_free(buf);
1241 return NULL;
1242 }
1243 mlx5_devx_wq_attr_fill(priv, rxq_ctrl, &rq_attr.wq_attr);
1244 rq = mlx5_devx_cmd_create_rq(priv->sh->ctx, &rq_attr, rxq_ctrl->socket);
1245 if (!rq)
1246 rxq_release_rq_resources(rxq_ctrl);
1247 return rq;
1248 }
1249
1250 /**
1251 * Create the Rx hairpin queue object.
1252 *
1253 * @param dev
1254 * Pointer to Ethernet device.
1255 * @param idx
1256 * Queue index in DPDK Rx queue array
1257 *
1258 * @return
1259 * The hairpin DevX object initialised, NULL otherwise and rte_errno is set.
1260 */
1261 static struct mlx5_rxq_obj *
1262 mlx5_rxq_obj_hairpin_new(struct rte_eth_dev *dev, uint16_t idx)
1263 {
1264 struct mlx5_priv *priv = dev->data->dev_private;
1265 struct mlx5_rxq_data *rxq_data = (*priv->rxqs)[idx];
1266 struct mlx5_rxq_ctrl *rxq_ctrl =
1267 container_of(rxq_data, struct mlx5_rxq_ctrl, rxq);
1268 struct mlx5_devx_create_rq_attr attr = { 0 };
1269 struct mlx5_rxq_obj *tmpl = NULL;
1270 int ret = 0;
1271 uint32_t max_wq_data;
1272
1273 MLX5_ASSERT(rxq_data);
1274 MLX5_ASSERT(!rxq_ctrl->obj);
1275 tmpl = rte_calloc_socket(__func__, 1, sizeof(*tmpl), 0,
1276 rxq_ctrl->socket);
1277 if (!tmpl) {
1278 DRV_LOG(ERR,
1279 "port %u Rx queue %u cannot allocate verbs resources",
1280 dev->data->port_id, rxq_data->idx);
1281 rte_errno = ENOMEM;
1282 goto error;
1283 }
1284 tmpl->type = MLX5_RXQ_OBJ_TYPE_DEVX_HAIRPIN;
1285 tmpl->rxq_ctrl = rxq_ctrl;
1286 attr.hairpin = 1;
1287 max_wq_data = priv->config.hca_attr.log_max_hairpin_wq_data_sz;
1288 /* Jumbo frames > 9KB should be supported, and more packets. */
1289 if (priv->config.log_hp_size != (uint32_t)MLX5_ARG_UNSET) {
1290 if (priv->config.log_hp_size > max_wq_data) {
1291 DRV_LOG(ERR, "total data size %u power of 2 is "
1292 "too large for hairpin",
1293 priv->config.log_hp_size);
1294 rte_errno = ERANGE;
1295 return NULL;
1296 }
1297 attr.wq_attr.log_hairpin_data_sz = priv->config.log_hp_size;
1298 } else {
1299 attr.wq_attr.log_hairpin_data_sz =
1300 (max_wq_data < MLX5_HAIRPIN_JUMBO_LOG_SIZE) ?
1301 max_wq_data : MLX5_HAIRPIN_JUMBO_LOG_SIZE;
1302 }
1303 /* Set the packets number to the maximum value for performance. */
1304 attr.wq_attr.log_hairpin_num_packets =
1305 attr.wq_attr.log_hairpin_data_sz -
1306 MLX5_HAIRPIN_QUEUE_STRIDE;
1307 tmpl->rq = mlx5_devx_cmd_create_rq(priv->sh->ctx, &attr,
1308 rxq_ctrl->socket);
1309 if (!tmpl->rq) {
1310 DRV_LOG(ERR,
1311 "port %u Rx hairpin queue %u can't create rq object",
1312 dev->data->port_id, idx);
1313 rte_errno = errno;
1314 goto error;
1315 }
1316 DRV_LOG(DEBUG, "port %u rxq %u updated with %p", dev->data->port_id,
1317 idx, (void *)&tmpl);
1318 rte_atomic32_inc(&tmpl->refcnt);
1319 LIST_INSERT_HEAD(&priv->rxqsobj, tmpl, next);
1320 priv->verbs_alloc_ctx.type = MLX5_VERBS_ALLOC_TYPE_NONE;
1321 return tmpl;
1322 error:
1323 ret = rte_errno; /* Save rte_errno before cleanup. */
1324 if (tmpl->rq)
1325 mlx5_devx_cmd_destroy(tmpl->rq);
1326 rte_errno = ret; /* Restore rte_errno. */
1327 return NULL;
1328 }
1329
1330 /**
1331 * Create the Rx queue Verbs/DevX object.
1332 *
1333 * @param dev
1334 * Pointer to Ethernet device.
1335 * @param idx
1336 * Queue index in DPDK Rx queue array
1337 * @param type
1338 * Type of Rx queue object to create.
1339 *
1340 * @return
1341 * The Verbs/DevX object initialised, NULL otherwise and rte_errno is set.
1342 */
1343 struct mlx5_rxq_obj *
1344 mlx5_rxq_obj_new(struct rte_eth_dev *dev, uint16_t idx,
1345 enum mlx5_rxq_obj_type type)
1346 {
1347 struct mlx5_priv *priv = dev->data->dev_private;
1348 struct mlx5_rxq_data *rxq_data = (*priv->rxqs)[idx];
1349 struct mlx5_rxq_ctrl *rxq_ctrl =
1350 container_of(rxq_data, struct mlx5_rxq_ctrl, rxq);
1351 struct ibv_wq_attr mod;
1352 unsigned int cqe_n;
1353 unsigned int wqe_n = 1 << rxq_data->elts_n;
1354 struct mlx5_rxq_obj *tmpl = NULL;
1355 struct mlx5dv_cq cq_info;
1356 struct mlx5dv_rwq rwq;
1357 int ret = 0;
1358 struct mlx5dv_obj obj;
1359
1360 MLX5_ASSERT(rxq_data);
1361 MLX5_ASSERT(!rxq_ctrl->obj);
1362 if (type == MLX5_RXQ_OBJ_TYPE_DEVX_HAIRPIN)
1363 return mlx5_rxq_obj_hairpin_new(dev, idx);
1364 priv->verbs_alloc_ctx.type = MLX5_VERBS_ALLOC_TYPE_RX_QUEUE;
1365 priv->verbs_alloc_ctx.obj = rxq_ctrl;
1366 tmpl = rte_calloc_socket(__func__, 1, sizeof(*tmpl), 0,
1367 rxq_ctrl->socket);
1368 if (!tmpl) {
1369 DRV_LOG(ERR,
1370 "port %u Rx queue %u cannot allocate verbs resources",
1371 dev->data->port_id, rxq_data->idx);
1372 rte_errno = ENOMEM;
1373 goto error;
1374 }
1375 tmpl->type = type;
1376 tmpl->rxq_ctrl = rxq_ctrl;
1377 if (rxq_ctrl->irq) {
1378 tmpl->channel = mlx5_glue->create_comp_channel(priv->sh->ctx);
1379 if (!tmpl->channel) {
1380 DRV_LOG(ERR, "port %u: comp channel creation failure",
1381 dev->data->port_id);
1382 rte_errno = ENOMEM;
1383 goto error;
1384 }
1385 }
1386 if (mlx5_rxq_mprq_enabled(rxq_data))
1387 cqe_n = wqe_n * (1 << rxq_data->strd_num_n) - 1;
1388 else
1389 cqe_n = wqe_n - 1;
1390 tmpl->cq = mlx5_ibv_cq_new(dev, priv, rxq_data, cqe_n, tmpl);
1391 if (!tmpl->cq) {
1392 DRV_LOG(ERR, "port %u Rx queue %u CQ creation failure",
1393 dev->data->port_id, idx);
1394 rte_errno = ENOMEM;
1395 goto error;
1396 }
1397 obj.cq.in = tmpl->cq;
1398 obj.cq.out = &cq_info;
1399 ret = mlx5_glue->dv_init_obj(&obj, MLX5DV_OBJ_CQ);
1400 if (ret) {
1401 rte_errno = ret;
1402 goto error;
1403 }
1404 if (cq_info.cqe_size != RTE_CACHE_LINE_SIZE) {
1405 DRV_LOG(ERR,
1406 "port %u wrong MLX5_CQE_SIZE environment variable"
1407 " value: it should be set to %u",
1408 dev->data->port_id, RTE_CACHE_LINE_SIZE);
1409 rte_errno = EINVAL;
1410 goto error;
1411 }
1412 DRV_LOG(DEBUG, "port %u device_attr.max_qp_wr is %d",
1413 dev->data->port_id, priv->sh->device_attr.orig_attr.max_qp_wr);
1414 DRV_LOG(DEBUG, "port %u device_attr.max_sge is %d",
1415 dev->data->port_id, priv->sh->device_attr.orig_attr.max_sge);
1416 /* Allocate door-bell for types created with DevX. */
1417 if (tmpl->type != MLX5_RXQ_OBJ_TYPE_IBV) {
1418 struct mlx5_devx_dbr_page *dbr_page;
1419 int64_t dbr_offset;
1420
1421 dbr_offset = mlx5_get_dbr(dev, &dbr_page);
1422 if (dbr_offset < 0)
1423 goto error;
1424 rxq_ctrl->dbr_offset = dbr_offset;
1425 rxq_ctrl->dbr_umem_id = dbr_page->umem->umem_id;
1426 rxq_ctrl->dbr_umem_id_valid = 1;
1427 rxq_data->rq_db = (uint32_t *)((uintptr_t)dbr_page->dbrs +
1428 (uintptr_t)rxq_ctrl->dbr_offset);
1429 }
1430 if (tmpl->type == MLX5_RXQ_OBJ_TYPE_IBV) {
1431 tmpl->wq = mlx5_ibv_wq_new(dev, priv, rxq_data, idx, wqe_n,
1432 tmpl);
1433 if (!tmpl->wq) {
1434 DRV_LOG(ERR, "port %u Rx queue %u WQ creation failure",
1435 dev->data->port_id, idx);
1436 rte_errno = ENOMEM;
1437 goto error;
1438 }
1439 /* Change queue state to ready. */
1440 mod = (struct ibv_wq_attr){
1441 .attr_mask = IBV_WQ_ATTR_STATE,
1442 .wq_state = IBV_WQS_RDY,
1443 };
1444 ret = mlx5_glue->modify_wq(tmpl->wq, &mod);
1445 if (ret) {
1446 DRV_LOG(ERR,
1447 "port %u Rx queue %u WQ state to IBV_WQS_RDY"
1448 " failed", dev->data->port_id, idx);
1449 rte_errno = ret;
1450 goto error;
1451 }
1452 obj.rwq.in = tmpl->wq;
1453 obj.rwq.out = &rwq;
1454 ret = mlx5_glue->dv_init_obj(&obj, MLX5DV_OBJ_RWQ);
1455 if (ret) {
1456 rte_errno = ret;
1457 goto error;
1458 }
1459 rxq_data->wqes = rwq.buf;
1460 rxq_data->rq_db = rwq.dbrec;
1461 } else if (tmpl->type == MLX5_RXQ_OBJ_TYPE_DEVX_RQ) {
1462 struct mlx5_devx_modify_rq_attr rq_attr;
1463
1464 memset(&rq_attr, 0, sizeof(rq_attr));
1465 tmpl->rq = mlx5_devx_rq_new(dev, idx, cq_info.cqn);
1466 if (!tmpl->rq) {
1467 DRV_LOG(ERR, "port %u Rx queue %u RQ creation failure",
1468 dev->data->port_id, idx);
1469 rte_errno = ENOMEM;
1470 goto error;
1471 }
1472 /* Change queue state to ready. */
1473 rq_attr.rq_state = MLX5_RQC_STATE_RST;
1474 rq_attr.state = MLX5_RQC_STATE_RDY;
1475 ret = mlx5_devx_cmd_modify_rq(tmpl->rq, &rq_attr);
1476 if (ret)
1477 goto error;
1478 }
1479 /* Fill the rings. */
1480 rxq_data->cqe_n = log2above(cq_info.cqe_cnt);
1481 rxq_data->cq_db = cq_info.dbrec;
1482 rxq_data->cqes = (volatile struct mlx5_cqe (*)[])(uintptr_t)cq_info.buf;
1483 rxq_data->cq_uar = cq_info.cq_uar;
1484 rxq_data->cqn = cq_info.cqn;
1485 rxq_data->cq_arm_sn = 0;
1486 mlx5_rxq_initialize(rxq_data);
1487 rxq_data->cq_ci = 0;
1488 DRV_LOG(DEBUG, "port %u rxq %u updated with %p", dev->data->port_id,
1489 idx, (void *)&tmpl);
1490 rte_atomic32_inc(&tmpl->refcnt);
1491 LIST_INSERT_HEAD(&priv->rxqsobj, tmpl, next);
1492 priv->verbs_alloc_ctx.type = MLX5_VERBS_ALLOC_TYPE_NONE;
1493 return tmpl;
1494 error:
1495 if (tmpl) {
1496 ret = rte_errno; /* Save rte_errno before cleanup. */
1497 if (tmpl->type == MLX5_RXQ_OBJ_TYPE_IBV && tmpl->wq)
1498 claim_zero(mlx5_glue->destroy_wq(tmpl->wq));
1499 else if (tmpl->type == MLX5_RXQ_OBJ_TYPE_DEVX_RQ && tmpl->rq)
1500 claim_zero(mlx5_devx_cmd_destroy(tmpl->rq));
1501 if (tmpl->cq)
1502 claim_zero(mlx5_glue->destroy_cq(tmpl->cq));
1503 if (tmpl->channel)
1504 claim_zero(mlx5_glue->destroy_comp_channel
1505 (tmpl->channel));
1506 rte_free(tmpl);
1507 rte_errno = ret; /* Restore rte_errno. */
1508 }
1509 if (type == MLX5_RXQ_OBJ_TYPE_DEVX_RQ)
1510 rxq_release_rq_resources(rxq_ctrl);
1511 priv->verbs_alloc_ctx.type = MLX5_VERBS_ALLOC_TYPE_NONE;
1512 return NULL;
1513 }
1514
1515 /**
1516 * Verify the Rx queue objects list is empty
1517 *
1518 * @param dev
1519 * Pointer to Ethernet device.
1520 *
1521 * @return
1522 * The number of objects not released.
1523 */
1524 int
1525 mlx5_rxq_obj_verify(struct rte_eth_dev *dev)
1526 {
1527 struct mlx5_priv *priv = dev->data->dev_private;
1528 int ret = 0;
1529 struct mlx5_rxq_obj *rxq_obj;
1530
1531 LIST_FOREACH(rxq_obj, &priv->rxqsobj, next) {
1532 DRV_LOG(DEBUG, "port %u Rx queue %u still referenced",
1533 dev->data->port_id, rxq_obj->rxq_ctrl->rxq.idx);
1534 ++ret;
1535 }
1536 return ret;
1537 }
1538
1539 /**
1540 * Callback function to initialize mbufs for Multi-Packet RQ.
1541 */
1542 static inline void
1543 mlx5_mprq_buf_init(struct rte_mempool *mp, void *opaque_arg,
1544 void *_m, unsigned int i __rte_unused)
1545 {
1546 struct mlx5_mprq_buf *buf = _m;
1547 struct rte_mbuf_ext_shared_info *shinfo;
1548 unsigned int strd_n = (unsigned int)(uintptr_t)opaque_arg;
1549 unsigned int j;
1550
1551 memset(_m, 0, sizeof(*buf));
1552 buf->mp = mp;
1553 rte_atomic16_set(&buf->refcnt, 1);
1554 for (j = 0; j != strd_n; ++j) {
1555 shinfo = &buf->shinfos[j];
1556 shinfo->free_cb = mlx5_mprq_buf_free_cb;
1557 shinfo->fcb_opaque = buf;
1558 }
1559 }
1560
1561 /**
1562 * Free mempool of Multi-Packet RQ.
1563 *
1564 * @param dev
1565 * Pointer to Ethernet device.
1566 *
1567 * @return
1568 * 0 on success, negative errno value on failure.
1569 */
1570 int
1571 mlx5_mprq_free_mp(struct rte_eth_dev *dev)
1572 {
1573 struct mlx5_priv *priv = dev->data->dev_private;
1574 struct rte_mempool *mp = priv->mprq_mp;
1575 unsigned int i;
1576
1577 if (mp == NULL)
1578 return 0;
1579 DRV_LOG(DEBUG, "port %u freeing mempool (%s) for Multi-Packet RQ",
1580 dev->data->port_id, mp->name);
1581 /*
1582 * If a buffer in the pool has been externally attached to a mbuf and it
1583 * is still in use by application, destroying the Rx queue can spoil
1584 * the packet. It is unlikely to happen but if application dynamically
1585 * creates and destroys with holding Rx packets, this can happen.
1586 *
1587 * TODO: It is unavoidable for now because the mempool for Multi-Packet
1588 * RQ isn't provided by application but managed by PMD.
1589 */
1590 if (!rte_mempool_full(mp)) {
1591 DRV_LOG(ERR,
1592 "port %u mempool for Multi-Packet RQ is still in use",
1593 dev->data->port_id);
1594 rte_errno = EBUSY;
1595 return -rte_errno;
1596 }
1597 rte_mempool_free(mp);
1598 /* Unset mempool for each Rx queue. */
1599 for (i = 0; i != priv->rxqs_n; ++i) {
1600 struct mlx5_rxq_data *rxq = (*priv->rxqs)[i];
1601
1602 if (rxq == NULL)
1603 continue;
1604 rxq->mprq_mp = NULL;
1605 }
1606 priv->mprq_mp = NULL;
1607 return 0;
1608 }
1609
1610 /**
1611 * Allocate a mempool for Multi-Packet RQ. All configured Rx queues share the
1612 * mempool. If already allocated, reuse it if there're enough elements.
1613 * Otherwise, resize it.
1614 *
1615 * @param dev
1616 * Pointer to Ethernet device.
1617 *
1618 * @return
1619 * 0 on success, negative errno value on failure.
1620 */
1621 int
1622 mlx5_mprq_alloc_mp(struct rte_eth_dev *dev)
1623 {
1624 struct mlx5_priv *priv = dev->data->dev_private;
1625 struct rte_mempool *mp = priv->mprq_mp;
1626 char name[RTE_MEMPOOL_NAMESIZE];
1627 unsigned int desc = 0;
1628 unsigned int buf_len;
1629 unsigned int obj_num;
1630 unsigned int obj_size;
1631 unsigned int strd_num_n = 0;
1632 unsigned int strd_sz_n = 0;
1633 unsigned int i;
1634 unsigned int n_ibv = 0;
1635
1636 if (!mlx5_mprq_enabled(dev))
1637 return 0;
1638 /* Count the total number of descriptors configured. */
1639 for (i = 0; i != priv->rxqs_n; ++i) {
1640 struct mlx5_rxq_data *rxq = (*priv->rxqs)[i];
1641 struct mlx5_rxq_ctrl *rxq_ctrl = container_of
1642 (rxq, struct mlx5_rxq_ctrl, rxq);
1643
1644 if (rxq == NULL || rxq_ctrl->type != MLX5_RXQ_TYPE_STANDARD)
1645 continue;
1646 n_ibv++;
1647 desc += 1 << rxq->elts_n;
1648 /* Get the max number of strides. */
1649 if (strd_num_n < rxq->strd_num_n)
1650 strd_num_n = rxq->strd_num_n;
1651 /* Get the max size of a stride. */
1652 if (strd_sz_n < rxq->strd_sz_n)
1653 strd_sz_n = rxq->strd_sz_n;
1654 }
1655 MLX5_ASSERT(strd_num_n && strd_sz_n);
1656 buf_len = (1 << strd_num_n) * (1 << strd_sz_n);
1657 obj_size = sizeof(struct mlx5_mprq_buf) + buf_len + (1 << strd_num_n) *
1658 sizeof(struct rte_mbuf_ext_shared_info) + RTE_PKTMBUF_HEADROOM;
1659 /*
1660 * Received packets can be either memcpy'd or externally referenced. In
1661 * case that the packet is attached to an mbuf as an external buffer, as
1662 * it isn't possible to predict how the buffers will be queued by
1663 * application, there's no option to exactly pre-allocate needed buffers
1664 * in advance but to speculatively prepares enough buffers.
1665 *
1666 * In the data path, if this Mempool is depleted, PMD will try to memcpy
1667 * received packets to buffers provided by application (rxq->mp) until
1668 * this Mempool gets available again.
1669 */
1670 desc *= 4;
1671 obj_num = desc + MLX5_MPRQ_MP_CACHE_SZ * n_ibv;
1672 /*
1673 * rte_mempool_create_empty() has sanity check to refuse large cache
1674 * size compared to the number of elements.
1675 * CACHE_FLUSHTHRESH_MULTIPLIER is defined in a C file, so using a
1676 * constant number 2 instead.
1677 */
1678 obj_num = RTE_MAX(obj_num, MLX5_MPRQ_MP_CACHE_SZ * 2);
1679 /* Check a mempool is already allocated and if it can be resued. */
1680 if (mp != NULL && mp->elt_size >= obj_size && mp->size >= obj_num) {
1681 DRV_LOG(DEBUG, "port %u mempool %s is being reused",
1682 dev->data->port_id, mp->name);
1683 /* Reuse. */
1684 goto exit;
1685 } else if (mp != NULL) {
1686 DRV_LOG(DEBUG, "port %u mempool %s should be resized, freeing it",
1687 dev->data->port_id, mp->name);
1688 /*
1689 * If failed to free, which means it may be still in use, no way
1690 * but to keep using the existing one. On buffer underrun,
1691 * packets will be memcpy'd instead of external buffer
1692 * attachment.
1693 */
1694 if (mlx5_mprq_free_mp(dev)) {
1695 if (mp->elt_size >= obj_size)
1696 goto exit;
1697 else
1698 return -rte_errno;
1699 }
1700 }
1701 snprintf(name, sizeof(name), "port-%u-mprq", dev->data->port_id);
1702 mp = rte_mempool_create(name, obj_num, obj_size, MLX5_MPRQ_MP_CACHE_SZ,
1703 0, NULL, NULL, mlx5_mprq_buf_init,
1704 (void *)(uintptr_t)(1 << strd_num_n),
1705 dev->device->numa_node, 0);
1706 if (mp == NULL) {
1707 DRV_LOG(ERR,
1708 "port %u failed to allocate a mempool for"
1709 " Multi-Packet RQ, count=%u, size=%u",
1710 dev->data->port_id, obj_num, obj_size);
1711 rte_errno = ENOMEM;
1712 return -rte_errno;
1713 }
1714 priv->mprq_mp = mp;
1715 exit:
1716 /* Set mempool for each Rx queue. */
1717 for (i = 0; i != priv->rxqs_n; ++i) {
1718 struct mlx5_rxq_data *rxq = (*priv->rxqs)[i];
1719 struct mlx5_rxq_ctrl *rxq_ctrl = container_of
1720 (rxq, struct mlx5_rxq_ctrl, rxq);
1721
1722 if (rxq == NULL || rxq_ctrl->type != MLX5_RXQ_TYPE_STANDARD)
1723 continue;
1724 rxq->mprq_mp = mp;
1725 }
1726 DRV_LOG(INFO, "port %u Multi-Packet RQ is configured",
1727 dev->data->port_id);
1728 return 0;
1729 }
1730
1731 #define MLX5_MAX_TCP_HDR_OFFSET ((unsigned int)(sizeof(struct rte_ether_hdr) + \
1732 sizeof(struct rte_vlan_hdr) * 2 + \
1733 sizeof(struct rte_ipv6_hdr)))
1734 #define MAX_TCP_OPTION_SIZE 40u
1735 #define MLX5_MAX_LRO_HEADER_FIX ((unsigned int)(MLX5_MAX_TCP_HDR_OFFSET + \
1736 sizeof(struct rte_tcp_hdr) + \
1737 MAX_TCP_OPTION_SIZE))
1738
1739 /**
1740 * Adjust the maximum LRO massage size.
1741 *
1742 * @param dev
1743 * Pointer to Ethernet device.
1744 * @param idx
1745 * RX queue index.
1746 * @param max_lro_size
1747 * The maximum size for LRO packet.
1748 */
1749 static void
1750 mlx5_max_lro_msg_size_adjust(struct rte_eth_dev *dev, uint16_t idx,
1751 uint32_t max_lro_size)
1752 {
1753 struct mlx5_priv *priv = dev->data->dev_private;
1754
1755 if (priv->config.hca_attr.lro_max_msg_sz_mode ==
1756 MLX5_LRO_MAX_MSG_SIZE_START_FROM_L4 && max_lro_size >
1757 MLX5_MAX_TCP_HDR_OFFSET)
1758 max_lro_size -= MLX5_MAX_TCP_HDR_OFFSET;
1759 max_lro_size = RTE_MIN(max_lro_size, MLX5_MAX_LRO_SIZE);
1760 MLX5_ASSERT(max_lro_size >= MLX5_LRO_SEG_CHUNK_SIZE);
1761 max_lro_size /= MLX5_LRO_SEG_CHUNK_SIZE;
1762 if (priv->max_lro_msg_size)
1763 priv->max_lro_msg_size =
1764 RTE_MIN((uint32_t)priv->max_lro_msg_size, max_lro_size);
1765 else
1766 priv->max_lro_msg_size = max_lro_size;
1767 DRV_LOG(DEBUG,
1768 "port %u Rx Queue %u max LRO message size adjusted to %u bytes",
1769 dev->data->port_id, idx,
1770 priv->max_lro_msg_size * MLX5_LRO_SEG_CHUNK_SIZE);
1771 }
1772
1773 /**
1774 * Create a DPDK Rx queue.
1775 *
1776 * @param dev
1777 * Pointer to Ethernet device.
1778 * @param idx
1779 * RX queue index.
1780 * @param desc
1781 * Number of descriptors to configure in queue.
1782 * @param socket
1783 * NUMA socket on which memory must be allocated.
1784 *
1785 * @return
1786 * A DPDK queue object on success, NULL otherwise and rte_errno is set.
1787 */
1788 struct mlx5_rxq_ctrl *
1789 mlx5_rxq_new(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
1790 unsigned int socket, const struct rte_eth_rxconf *conf,
1791 struct rte_mempool *mp)
1792 {
1793 struct mlx5_priv *priv = dev->data->dev_private;
1794 struct mlx5_rxq_ctrl *tmpl;
1795 unsigned int mb_len = rte_pktmbuf_data_room_size(mp);
1796 unsigned int mprq_stride_nums;
1797 unsigned int mprq_stride_size;
1798 unsigned int mprq_stride_cap;
1799 struct mlx5_dev_config *config = &priv->config;
1800 /*
1801 * Always allocate extra slots, even if eventually
1802 * the vector Rx will not be used.
1803 */
1804 uint16_t desc_n =
1805 desc + config->rx_vec_en * MLX5_VPMD_DESCS_PER_LOOP;
1806 uint64_t offloads = conf->offloads |
1807 dev->data->dev_conf.rxmode.offloads;
1808 unsigned int lro_on_queue = !!(offloads & DEV_RX_OFFLOAD_TCP_LRO);
1809 const int mprq_en = mlx5_check_mprq_support(dev) > 0;
1810 unsigned int max_rx_pkt_len = lro_on_queue ?
1811 dev->data->dev_conf.rxmode.max_lro_pkt_size :
1812 dev->data->dev_conf.rxmode.max_rx_pkt_len;
1813 unsigned int non_scatter_min_mbuf_size = max_rx_pkt_len +
1814 RTE_PKTMBUF_HEADROOM;
1815 unsigned int max_lro_size = 0;
1816 unsigned int first_mb_free_size = mb_len - RTE_PKTMBUF_HEADROOM;
1817
1818 if (non_scatter_min_mbuf_size > mb_len && !(offloads &
1819 DEV_RX_OFFLOAD_SCATTER)) {
1820 DRV_LOG(ERR, "port %u Rx queue %u: Scatter offload is not"
1821 " configured and no enough mbuf space(%u) to contain "
1822 "the maximum RX packet length(%u) with head-room(%u)",
1823 dev->data->port_id, idx, mb_len, max_rx_pkt_len,
1824 RTE_PKTMBUF_HEADROOM);
1825 rte_errno = ENOSPC;
1826 return NULL;
1827 }
1828 tmpl = rte_calloc_socket("RXQ", 1,
1829 sizeof(*tmpl) +
1830 desc_n * sizeof(struct rte_mbuf *),
1831 0, socket);
1832 if (!tmpl) {
1833 rte_errno = ENOMEM;
1834 return NULL;
1835 }
1836 tmpl->type = MLX5_RXQ_TYPE_STANDARD;
1837 if (mlx5_mr_btree_init(&tmpl->rxq.mr_ctrl.cache_bh,
1838 MLX5_MR_BTREE_CACHE_N, socket)) {
1839 /* rte_errno is already set. */
1840 goto error;
1841 }
1842 tmpl->socket = socket;
1843 if (dev->data->dev_conf.intr_conf.rxq)
1844 tmpl->irq = 1;
1845 mprq_stride_nums = config->mprq.stride_num_n ?
1846 config->mprq.stride_num_n : MLX5_MPRQ_STRIDE_NUM_N;
1847 mprq_stride_size = non_scatter_min_mbuf_size <=
1848 (1U << config->mprq.max_stride_size_n) ?
1849 log2above(non_scatter_min_mbuf_size) : MLX5_MPRQ_STRIDE_SIZE_N;
1850 mprq_stride_cap = (config->mprq.stride_num_n ?
1851 (1U << config->mprq.stride_num_n) : (1U << mprq_stride_nums)) *
1852 (config->mprq.stride_size_n ?
1853 (1U << config->mprq.stride_size_n) : (1U << mprq_stride_size));
1854 /*
1855 * This Rx queue can be configured as a Multi-Packet RQ if all of the
1856 * following conditions are met:
1857 * - MPRQ is enabled.
1858 * - The number of descs is more than the number of strides.
1859 * - max_rx_pkt_len plus overhead is less than the max size
1860 * of a stride or mprq_stride_size is specified by a user.
1861 * Need to nake sure that there are enough stides to encap
1862 * the maximum packet size in case mprq_stride_size is set.
1863 * Otherwise, enable Rx scatter if necessary.
1864 */
1865 if (mprq_en && desc > (1U << mprq_stride_nums) &&
1866 (non_scatter_min_mbuf_size <=
1867 (1U << config->mprq.max_stride_size_n) ||
1868 (config->mprq.stride_size_n &&
1869 non_scatter_min_mbuf_size <= mprq_stride_cap))) {
1870 /* TODO: Rx scatter isn't supported yet. */
1871 tmpl->rxq.sges_n = 0;
1872 /* Trim the number of descs needed. */
1873 desc >>= mprq_stride_nums;
1874 tmpl->rxq.strd_num_n = config->mprq.stride_num_n ?
1875 config->mprq.stride_num_n : mprq_stride_nums;
1876 tmpl->rxq.strd_sz_n = config->mprq.stride_size_n ?
1877 config->mprq.stride_size_n : mprq_stride_size;
1878 tmpl->rxq.strd_shift_en = MLX5_MPRQ_TWO_BYTE_SHIFT;
1879 tmpl->rxq.strd_scatter_en =
1880 !!(offloads & DEV_RX_OFFLOAD_SCATTER);
1881 tmpl->rxq.mprq_max_memcpy_len = RTE_MIN(first_mb_free_size,
1882 config->mprq.max_memcpy_len);
1883 max_lro_size = RTE_MIN(max_rx_pkt_len,
1884 (1u << tmpl->rxq.strd_num_n) *
1885 (1u << tmpl->rxq.strd_sz_n));
1886 DRV_LOG(DEBUG,
1887 "port %u Rx queue %u: Multi-Packet RQ is enabled"
1888 " strd_num_n = %u, strd_sz_n = %u",
1889 dev->data->port_id, idx,
1890 tmpl->rxq.strd_num_n, tmpl->rxq.strd_sz_n);
1891 } else if (max_rx_pkt_len <= first_mb_free_size) {
1892 tmpl->rxq.sges_n = 0;
1893 max_lro_size = max_rx_pkt_len;
1894 } else if (offloads & DEV_RX_OFFLOAD_SCATTER) {
1895 unsigned int size = non_scatter_min_mbuf_size;
1896 unsigned int sges_n;
1897
1898 if (lro_on_queue && first_mb_free_size <
1899 MLX5_MAX_LRO_HEADER_FIX) {
1900 DRV_LOG(ERR, "Not enough space in the first segment(%u)"
1901 " to include the max header size(%u) for LRO",
1902 first_mb_free_size, MLX5_MAX_LRO_HEADER_FIX);
1903 rte_errno = ENOTSUP;
1904 goto error;
1905 }
1906 /*
1907 * Determine the number of SGEs needed for a full packet
1908 * and round it to the next power of two.
1909 */
1910 sges_n = log2above((size / mb_len) + !!(size % mb_len));
1911 if (sges_n > MLX5_MAX_LOG_RQ_SEGS) {
1912 DRV_LOG(ERR,
1913 "port %u too many SGEs (%u) needed to handle"
1914 " requested maximum packet size %u, the maximum"
1915 " supported are %u", dev->data->port_id,
1916 1 << sges_n, max_rx_pkt_len,
1917 1u << MLX5_MAX_LOG_RQ_SEGS);
1918 rte_errno = ENOTSUP;
1919 goto error;
1920 }
1921 tmpl->rxq.sges_n = sges_n;
1922 max_lro_size = max_rx_pkt_len;
1923 }
1924 if (config->mprq.enabled && !mlx5_rxq_mprq_enabled(&tmpl->rxq))
1925 DRV_LOG(WARNING,
1926 "port %u MPRQ is requested but cannot be enabled\n"
1927 " (requested: pkt_sz = %u, desc_num = %u,"
1928 " rxq_num = %u, stride_sz = %u, stride_num = %u\n"
1929 " supported: min_rxqs_num = %u,"
1930 " min_stride_sz = %u, max_stride_sz = %u).",
1931 dev->data->port_id, non_scatter_min_mbuf_size,
1932 desc, priv->rxqs_n,
1933 config->mprq.stride_size_n ?
1934 (1U << config->mprq.stride_size_n) :
1935 (1U << mprq_stride_size),
1936 config->mprq.stride_num_n ?
1937 (1U << config->mprq.stride_num_n) :
1938 (1U << mprq_stride_nums),
1939 config->mprq.min_rxqs_num,
1940 (1U << config->mprq.min_stride_size_n),
1941 (1U << config->mprq.max_stride_size_n));
1942 DRV_LOG(DEBUG, "port %u maximum number of segments per packet: %u",
1943 dev->data->port_id, 1 << tmpl->rxq.sges_n);
1944 if (desc % (1 << tmpl->rxq.sges_n)) {
1945 DRV_LOG(ERR,
1946 "port %u number of Rx queue descriptors (%u) is not a"
1947 " multiple of SGEs per packet (%u)",
1948 dev->data->port_id,
1949 desc,
1950 1 << tmpl->rxq.sges_n);
1951 rte_errno = EINVAL;
1952 goto error;
1953 }
1954 mlx5_max_lro_msg_size_adjust(dev, idx, max_lro_size);
1955 /* Toggle RX checksum offload if hardware supports it. */
1956 tmpl->rxq.csum = !!(offloads & DEV_RX_OFFLOAD_CHECKSUM);
1957 tmpl->rxq.hw_timestamp = !!(offloads & DEV_RX_OFFLOAD_TIMESTAMP);
1958 /* Configure VLAN stripping. */
1959 tmpl->rxq.vlan_strip = !!(offloads & DEV_RX_OFFLOAD_VLAN_STRIP);
1960 /* By default, FCS (CRC) is stripped by hardware. */
1961 tmpl->rxq.crc_present = 0;
1962 tmpl->rxq.lro = lro_on_queue;
1963 if (offloads & DEV_RX_OFFLOAD_KEEP_CRC) {
1964 if (config->hw_fcs_strip) {
1965 /*
1966 * RQs used for LRO-enabled TIRs should not be
1967 * configured to scatter the FCS.
1968 */
1969 if (lro_on_queue)
1970 DRV_LOG(WARNING,
1971 "port %u CRC stripping has been "
1972 "disabled but will still be performed "
1973 "by hardware, because LRO is enabled",
1974 dev->data->port_id);
1975 else
1976 tmpl->rxq.crc_present = 1;
1977 } else {
1978 DRV_LOG(WARNING,
1979 "port %u CRC stripping has been disabled but will"
1980 " still be performed by hardware, make sure MLNX_OFED"
1981 " and firmware are up to date",
1982 dev->data->port_id);
1983 }
1984 }
1985 DRV_LOG(DEBUG,
1986 "port %u CRC stripping is %s, %u bytes will be subtracted from"
1987 " incoming frames to hide it",
1988 dev->data->port_id,
1989 tmpl->rxq.crc_present ? "disabled" : "enabled",
1990 tmpl->rxq.crc_present << 2);
1991 /* Save port ID. */
1992 tmpl->rxq.rss_hash = !!priv->rss_conf.rss_hf &&
1993 (!!(dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_RSS));
1994 tmpl->rxq.port_id = dev->data->port_id;
1995 tmpl->priv = priv;
1996 tmpl->rxq.mp = mp;
1997 tmpl->rxq.elts_n = log2above(desc);
1998 tmpl->rxq.rq_repl_thresh =
1999 MLX5_VPMD_RXQ_RPLNSH_THRESH(1 << tmpl->rxq.elts_n);
2000 tmpl->rxq.elts =
2001 (struct rte_mbuf *(*)[1 << tmpl->rxq.elts_n])(tmpl + 1);
2002 #ifndef RTE_ARCH_64
2003 tmpl->rxq.uar_lock_cq = &priv->uar_lock_cq;
2004 #endif
2005 tmpl->rxq.idx = idx;
2006 rte_atomic32_inc(&tmpl->refcnt);
2007 LIST_INSERT_HEAD(&priv->rxqsctrl, tmpl, next);
2008 return tmpl;
2009 error:
2010 rte_free(tmpl);
2011 return NULL;
2012 }
2013
2014 /**
2015 * Create a DPDK Rx hairpin queue.
2016 *
2017 * @param dev
2018 * Pointer to Ethernet device.
2019 * @param idx
2020 * RX queue index.
2021 * @param desc
2022 * Number of descriptors to configure in queue.
2023 * @param hairpin_conf
2024 * The hairpin binding configuration.
2025 *
2026 * @return
2027 * A DPDK queue object on success, NULL otherwise and rte_errno is set.
2028 */
2029 struct mlx5_rxq_ctrl *
2030 mlx5_rxq_hairpin_new(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
2031 const struct rte_eth_hairpin_conf *hairpin_conf)
2032 {
2033 struct mlx5_priv *priv = dev->data->dev_private;
2034 struct mlx5_rxq_ctrl *tmpl;
2035
2036 tmpl = rte_calloc_socket("RXQ", 1, sizeof(*tmpl), 0, SOCKET_ID_ANY);
2037 if (!tmpl) {
2038 rte_errno = ENOMEM;
2039 return NULL;
2040 }
2041 tmpl->type = MLX5_RXQ_TYPE_HAIRPIN;
2042 tmpl->socket = SOCKET_ID_ANY;
2043 tmpl->rxq.rss_hash = 0;
2044 tmpl->rxq.port_id = dev->data->port_id;
2045 tmpl->priv = priv;
2046 tmpl->rxq.mp = NULL;
2047 tmpl->rxq.elts_n = log2above(desc);
2048 tmpl->rxq.elts = NULL;
2049 tmpl->rxq.mr_ctrl.cache_bh = (struct mlx5_mr_btree) { 0 };
2050 tmpl->hairpin_conf = *hairpin_conf;
2051 tmpl->rxq.idx = idx;
2052 rte_atomic32_inc(&tmpl->refcnt);
2053 LIST_INSERT_HEAD(&priv->rxqsctrl, tmpl, next);
2054 return tmpl;
2055 }
2056
2057 /**
2058 * Get a Rx queue.
2059 *
2060 * @param dev
2061 * Pointer to Ethernet device.
2062 * @param idx
2063 * RX queue index.
2064 *
2065 * @return
2066 * A pointer to the queue if it exists, NULL otherwise.
2067 */
2068 struct mlx5_rxq_ctrl *
2069 mlx5_rxq_get(struct rte_eth_dev *dev, uint16_t idx)
2070 {
2071 struct mlx5_priv *priv = dev->data->dev_private;
2072 struct mlx5_rxq_ctrl *rxq_ctrl = NULL;
2073
2074 if ((*priv->rxqs)[idx]) {
2075 rxq_ctrl = container_of((*priv->rxqs)[idx],
2076 struct mlx5_rxq_ctrl,
2077 rxq);
2078 mlx5_rxq_obj_get(dev, idx);
2079 rte_atomic32_inc(&rxq_ctrl->refcnt);
2080 }
2081 return rxq_ctrl;
2082 }
2083
2084 /**
2085 * Release a Rx queue.
2086 *
2087 * @param dev
2088 * Pointer to Ethernet device.
2089 * @param idx
2090 * RX queue index.
2091 *
2092 * @return
2093 * 1 while a reference on it exists, 0 when freed.
2094 */
2095 int
2096 mlx5_rxq_release(struct rte_eth_dev *dev, uint16_t idx)
2097 {
2098 struct mlx5_priv *priv = dev->data->dev_private;
2099 struct mlx5_rxq_ctrl *rxq_ctrl;
2100
2101 if (!(*priv->rxqs)[idx])
2102 return 0;
2103 rxq_ctrl = container_of((*priv->rxqs)[idx], struct mlx5_rxq_ctrl, rxq);
2104 MLX5_ASSERT(rxq_ctrl->priv);
2105 if (rxq_ctrl->obj && !mlx5_rxq_obj_release(rxq_ctrl->obj))
2106 rxq_ctrl->obj = NULL;
2107 if (rte_atomic32_dec_and_test(&rxq_ctrl->refcnt)) {
2108 if (rxq_ctrl->dbr_umem_id_valid)
2109 claim_zero(mlx5_release_dbr(dev, rxq_ctrl->dbr_umem_id,
2110 rxq_ctrl->dbr_offset));
2111 if (rxq_ctrl->type == MLX5_RXQ_TYPE_STANDARD)
2112 mlx5_mr_btree_free(&rxq_ctrl->rxq.mr_ctrl.cache_bh);
2113 LIST_REMOVE(rxq_ctrl, next);
2114 rte_free(rxq_ctrl);
2115 (*priv->rxqs)[idx] = NULL;
2116 return 0;
2117 }
2118 return 1;
2119 }
2120
2121 /**
2122 * Verify the Rx Queue list is empty
2123 *
2124 * @param dev
2125 * Pointer to Ethernet device.
2126 *
2127 * @return
2128 * The number of object not released.
2129 */
2130 int
2131 mlx5_rxq_verify(struct rte_eth_dev *dev)
2132 {
2133 struct mlx5_priv *priv = dev->data->dev_private;
2134 struct mlx5_rxq_ctrl *rxq_ctrl;
2135 int ret = 0;
2136
2137 LIST_FOREACH(rxq_ctrl, &priv->rxqsctrl, next) {
2138 DRV_LOG(DEBUG, "port %u Rx Queue %u still referenced",
2139 dev->data->port_id, rxq_ctrl->rxq.idx);
2140 ++ret;
2141 }
2142 return ret;
2143 }
2144
2145 /**
2146 * Get a Rx queue type.
2147 *
2148 * @param dev
2149 * Pointer to Ethernet device.
2150 * @param idx
2151 * Rx queue index.
2152 *
2153 * @return
2154 * The Rx queue type.
2155 */
2156 enum mlx5_rxq_type
2157 mlx5_rxq_get_type(struct rte_eth_dev *dev, uint16_t idx)
2158 {
2159 struct mlx5_priv *priv = dev->data->dev_private;
2160 struct mlx5_rxq_ctrl *rxq_ctrl = NULL;
2161
2162 if (idx < priv->rxqs_n && (*priv->rxqs)[idx]) {
2163 rxq_ctrl = container_of((*priv->rxqs)[idx],
2164 struct mlx5_rxq_ctrl,
2165 rxq);
2166 return rxq_ctrl->type;
2167 }
2168 return MLX5_RXQ_TYPE_UNDEFINED;
2169 }
2170
2171 /**
2172 * Create an indirection table.
2173 *
2174 * @param dev
2175 * Pointer to Ethernet device.
2176 * @param queues
2177 * Queues entering in the indirection table.
2178 * @param queues_n
2179 * Number of queues in the array.
2180 *
2181 * @return
2182 * The Verbs/DevX object initialised, NULL otherwise and rte_errno is set.
2183 */
2184 static struct mlx5_ind_table_obj *
2185 mlx5_ind_table_obj_new(struct rte_eth_dev *dev, const uint16_t *queues,
2186 uint32_t queues_n, enum mlx5_ind_tbl_type type)
2187 {
2188 struct mlx5_priv *priv = dev->data->dev_private;
2189 struct mlx5_ind_table_obj *ind_tbl;
2190 unsigned int i = 0, j = 0, k = 0;
2191
2192 ind_tbl = rte_calloc(__func__, 1, sizeof(*ind_tbl) +
2193 queues_n * sizeof(uint16_t), 0);
2194 if (!ind_tbl) {
2195 rte_errno = ENOMEM;
2196 return NULL;
2197 }
2198 ind_tbl->type = type;
2199 if (ind_tbl->type == MLX5_IND_TBL_TYPE_IBV) {
2200 const unsigned int wq_n = rte_is_power_of_2(queues_n) ?
2201 log2above(queues_n) :
2202 log2above(priv->config.ind_table_max_size);
2203 struct ibv_wq *wq[1 << wq_n];
2204
2205 for (i = 0; i != queues_n; ++i) {
2206 struct mlx5_rxq_ctrl *rxq = mlx5_rxq_get(dev,
2207 queues[i]);
2208 if (!rxq)
2209 goto error;
2210 wq[i] = rxq->obj->wq;
2211 ind_tbl->queues[i] = queues[i];
2212 }
2213 ind_tbl->queues_n = queues_n;
2214 /* Finalise indirection table. */
2215 k = i; /* Retain value of i for use in error case. */
2216 for (j = 0; k != (unsigned int)(1 << wq_n); ++k, ++j)
2217 wq[k] = wq[j];
2218 ind_tbl->ind_table = mlx5_glue->create_rwq_ind_table
2219 (priv->sh->ctx,
2220 &(struct ibv_rwq_ind_table_init_attr){
2221 .log_ind_tbl_size = wq_n,
2222 .ind_tbl = wq,
2223 .comp_mask = 0,
2224 });
2225 if (!ind_tbl->ind_table) {
2226 rte_errno = errno;
2227 goto error;
2228 }
2229 } else { /* ind_tbl->type == MLX5_IND_TBL_TYPE_DEVX */
2230 struct mlx5_devx_rqt_attr *rqt_attr = NULL;
2231 const unsigned int rqt_n =
2232 1 << (rte_is_power_of_2(queues_n) ?
2233 log2above(queues_n) :
2234 log2above(priv->config.ind_table_max_size));
2235
2236 rqt_attr = rte_calloc(__func__, 1, sizeof(*rqt_attr) +
2237 rqt_n * sizeof(uint32_t), 0);
2238 if (!rqt_attr) {
2239 DRV_LOG(ERR, "port %u cannot allocate RQT resources",
2240 dev->data->port_id);
2241 rte_errno = ENOMEM;
2242 goto error;
2243 }
2244 rqt_attr->rqt_max_size = priv->config.ind_table_max_size;
2245 rqt_attr->rqt_actual_size = rqt_n;
2246 for (i = 0; i != queues_n; ++i) {
2247 struct mlx5_rxq_ctrl *rxq = mlx5_rxq_get(dev,
2248 queues[i]);
2249 if (!rxq)
2250 goto error;
2251 rqt_attr->rq_list[i] = rxq->obj->rq->id;
2252 ind_tbl->queues[i] = queues[i];
2253 }
2254 k = i; /* Retain value of i for use in error case. */
2255 for (j = 0; k != rqt_n; ++k, ++j)
2256 rqt_attr->rq_list[k] = rqt_attr->rq_list[j];
2257 ind_tbl->rqt = mlx5_devx_cmd_create_rqt(priv->sh->ctx,
2258 rqt_attr);
2259 rte_free(rqt_attr);
2260 if (!ind_tbl->rqt) {
2261 DRV_LOG(ERR, "port %u cannot create DevX RQT",
2262 dev->data->port_id);
2263 rte_errno = errno;
2264 goto error;
2265 }
2266 ind_tbl->queues_n = queues_n;
2267 }
2268 rte_atomic32_inc(&ind_tbl->refcnt);
2269 LIST_INSERT_HEAD(&priv->ind_tbls, ind_tbl, next);
2270 return ind_tbl;
2271 error:
2272 for (j = 0; j < i; j++)
2273 mlx5_rxq_release(dev, ind_tbl->queues[j]);
2274 rte_free(ind_tbl);
2275 DEBUG("port %u cannot create indirection table", dev->data->port_id);
2276 return NULL;
2277 }
2278
2279 /**
2280 * Get an indirection table.
2281 *
2282 * @param dev
2283 * Pointer to Ethernet device.
2284 * @param queues
2285 * Queues entering in the indirection table.
2286 * @param queues_n
2287 * Number of queues in the array.
2288 *
2289 * @return
2290 * An indirection table if found.
2291 */
2292 static struct mlx5_ind_table_obj *
2293 mlx5_ind_table_obj_get(struct rte_eth_dev *dev, const uint16_t *queues,
2294 uint32_t queues_n)
2295 {
2296 struct mlx5_priv *priv = dev->data->dev_private;
2297 struct mlx5_ind_table_obj *ind_tbl;
2298
2299 LIST_FOREACH(ind_tbl, &priv->ind_tbls, next) {
2300 if ((ind_tbl->queues_n == queues_n) &&
2301 (memcmp(ind_tbl->queues, queues,
2302 ind_tbl->queues_n * sizeof(ind_tbl->queues[0]))
2303 == 0))
2304 break;
2305 }
2306 if (ind_tbl) {
2307 unsigned int i;
2308
2309 rte_atomic32_inc(&ind_tbl->refcnt);
2310 for (i = 0; i != ind_tbl->queues_n; ++i)
2311 mlx5_rxq_get(dev, ind_tbl->queues[i]);
2312 }
2313 return ind_tbl;
2314 }
2315
2316 /**
2317 * Release an indirection table.
2318 *
2319 * @param dev
2320 * Pointer to Ethernet device.
2321 * @param ind_table
2322 * Indirection table to release.
2323 *
2324 * @return
2325 * 1 while a reference on it exists, 0 when freed.
2326 */
2327 static int
2328 mlx5_ind_table_obj_release(struct rte_eth_dev *dev,
2329 struct mlx5_ind_table_obj *ind_tbl)
2330 {
2331 unsigned int i;
2332
2333 if (rte_atomic32_dec_and_test(&ind_tbl->refcnt)) {
2334 if (ind_tbl->type == MLX5_IND_TBL_TYPE_IBV)
2335 claim_zero(mlx5_glue->destroy_rwq_ind_table
2336 (ind_tbl->ind_table));
2337 else if (ind_tbl->type == MLX5_IND_TBL_TYPE_DEVX)
2338 claim_zero(mlx5_devx_cmd_destroy(ind_tbl->rqt));
2339 }
2340 for (i = 0; i != ind_tbl->queues_n; ++i)
2341 claim_nonzero(mlx5_rxq_release(dev, ind_tbl->queues[i]));
2342 if (!rte_atomic32_read(&ind_tbl->refcnt)) {
2343 LIST_REMOVE(ind_tbl, next);
2344 rte_free(ind_tbl);
2345 return 0;
2346 }
2347 return 1;
2348 }
2349
2350 /**
2351 * Verify the Rx Queue list is empty
2352 *
2353 * @param dev
2354 * Pointer to Ethernet device.
2355 *
2356 * @return
2357 * The number of object not released.
2358 */
2359 int
2360 mlx5_ind_table_obj_verify(struct rte_eth_dev *dev)
2361 {
2362 struct mlx5_priv *priv = dev->data->dev_private;
2363 struct mlx5_ind_table_obj *ind_tbl;
2364 int ret = 0;
2365
2366 LIST_FOREACH(ind_tbl, &priv->ind_tbls, next) {
2367 DRV_LOG(DEBUG,
2368 "port %u indirection table obj %p still referenced",
2369 dev->data->port_id, (void *)ind_tbl);
2370 ++ret;
2371 }
2372 return ret;
2373 }
2374
2375 /**
2376 * Create an Rx Hash queue.
2377 *
2378 * @param dev
2379 * Pointer to Ethernet device.
2380 * @param rss_key
2381 * RSS key for the Rx hash queue.
2382 * @param rss_key_len
2383 * RSS key length.
2384 * @param hash_fields
2385 * Verbs protocol hash field to make the RSS on.
2386 * @param queues
2387 * Queues entering in hash queue. In case of empty hash_fields only the
2388 * first queue index will be taken for the indirection table.
2389 * @param queues_n
2390 * Number of queues.
2391 * @param tunnel
2392 * Tunnel type.
2393 *
2394 * @return
2395 * The Verbs/DevX object initialised index, 0 otherwise and rte_errno is set.
2396 */
2397 uint32_t
2398 mlx5_hrxq_new(struct rte_eth_dev *dev,
2399 const uint8_t *rss_key, uint32_t rss_key_len,
2400 uint64_t hash_fields,
2401 const uint16_t *queues, uint32_t queues_n,
2402 int tunnel __rte_unused)
2403 {
2404 struct mlx5_priv *priv = dev->data->dev_private;
2405 struct mlx5_hrxq *hrxq;
2406 uint32_t hrxq_idx = 0;
2407 struct ibv_qp *qp = NULL;
2408 struct mlx5_ind_table_obj *ind_tbl;
2409 int err;
2410 struct mlx5_devx_obj *tir = NULL;
2411 struct mlx5_rxq_data *rxq_data = (*priv->rxqs)[queues[0]];
2412 struct mlx5_rxq_ctrl *rxq_ctrl =
2413 container_of(rxq_data, struct mlx5_rxq_ctrl, rxq);
2414
2415 queues_n = hash_fields ? queues_n : 1;
2416 ind_tbl = mlx5_ind_table_obj_get(dev, queues, queues_n);
2417 if (!ind_tbl) {
2418 enum mlx5_ind_tbl_type type;
2419
2420 type = rxq_ctrl->obj->type == MLX5_RXQ_OBJ_TYPE_IBV ?
2421 MLX5_IND_TBL_TYPE_IBV : MLX5_IND_TBL_TYPE_DEVX;
2422 ind_tbl = mlx5_ind_table_obj_new(dev, queues, queues_n, type);
2423 }
2424 if (!ind_tbl) {
2425 rte_errno = ENOMEM;
2426 return 0;
2427 }
2428 if (ind_tbl->type == MLX5_IND_TBL_TYPE_IBV) {
2429 #ifdef HAVE_IBV_DEVICE_TUNNEL_SUPPORT
2430 struct mlx5dv_qp_init_attr qp_init_attr;
2431
2432 memset(&qp_init_attr, 0, sizeof(qp_init_attr));
2433 if (tunnel) {
2434 qp_init_attr.comp_mask =
2435 MLX5DV_QP_INIT_ATTR_MASK_QP_CREATE_FLAGS;
2436 qp_init_attr.create_flags =
2437 MLX5DV_QP_CREATE_TUNNEL_OFFLOADS;
2438 }
2439 #ifdef HAVE_IBV_FLOW_DV_SUPPORT
2440 if (dev->data->dev_conf.lpbk_mode) {
2441 /*
2442 * Allow packet sent from NIC loop back
2443 * w/o source MAC check.
2444 */
2445 qp_init_attr.comp_mask |=
2446 MLX5DV_QP_INIT_ATTR_MASK_QP_CREATE_FLAGS;
2447 qp_init_attr.create_flags |=
2448 MLX5DV_QP_CREATE_TIR_ALLOW_SELF_LOOPBACK_UC;
2449 }
2450 #endif
2451 qp = mlx5_glue->dv_create_qp
2452 (priv->sh->ctx,
2453 &(struct ibv_qp_init_attr_ex){
2454 .qp_type = IBV_QPT_RAW_PACKET,
2455 .comp_mask =
2456 IBV_QP_INIT_ATTR_PD |
2457 IBV_QP_INIT_ATTR_IND_TABLE |
2458 IBV_QP_INIT_ATTR_RX_HASH,
2459 .rx_hash_conf = (struct ibv_rx_hash_conf){
2460 .rx_hash_function =
2461 IBV_RX_HASH_FUNC_TOEPLITZ,
2462 .rx_hash_key_len = rss_key_len,
2463 .rx_hash_key =
2464 (void *)(uintptr_t)rss_key,
2465 .rx_hash_fields_mask = hash_fields,
2466 },
2467 .rwq_ind_tbl = ind_tbl->ind_table,
2468 .pd = priv->sh->pd,
2469 },
2470 &qp_init_attr);
2471 #else
2472 qp = mlx5_glue->create_qp_ex
2473 (priv->sh->ctx,
2474 &(struct ibv_qp_init_attr_ex){
2475 .qp_type = IBV_QPT_RAW_PACKET,
2476 .comp_mask =
2477 IBV_QP_INIT_ATTR_PD |
2478 IBV_QP_INIT_ATTR_IND_TABLE |
2479 IBV_QP_INIT_ATTR_RX_HASH,
2480 .rx_hash_conf = (struct ibv_rx_hash_conf){
2481 .rx_hash_function =
2482 IBV_RX_HASH_FUNC_TOEPLITZ,
2483 .rx_hash_key_len = rss_key_len,
2484 .rx_hash_key =
2485 (void *)(uintptr_t)rss_key,
2486 .rx_hash_fields_mask = hash_fields,
2487 },
2488 .rwq_ind_tbl = ind_tbl->ind_table,
2489 .pd = priv->sh->pd,
2490 });
2491 #endif
2492 if (!qp) {
2493 rte_errno = errno;
2494 goto error;
2495 }
2496 } else { /* ind_tbl->type == MLX5_IND_TBL_TYPE_DEVX */
2497 struct mlx5_devx_tir_attr tir_attr;
2498 uint32_t i;
2499 uint32_t lro = 1;
2500
2501 /* Enable TIR LRO only if all the queues were configured for. */
2502 for (i = 0; i < queues_n; ++i) {
2503 if (!(*priv->rxqs)[queues[i]]->lro) {
2504 lro = 0;
2505 break;
2506 }
2507 }
2508 memset(&tir_attr, 0, sizeof(tir_attr));
2509 tir_attr.disp_type = MLX5_TIRC_DISP_TYPE_INDIRECT;
2510 tir_attr.rx_hash_fn = MLX5_RX_HASH_FN_TOEPLITZ;
2511 tir_attr.tunneled_offload_en = !!tunnel;
2512 /* If needed, translate hash_fields bitmap to PRM format. */
2513 if (hash_fields) {
2514 #ifdef HAVE_IBV_DEVICE_TUNNEL_SUPPORT
2515 struct mlx5_rx_hash_field_select *rx_hash_field_select =
2516 hash_fields & IBV_RX_HASH_INNER ?
2517 &tir_attr.rx_hash_field_selector_inner :
2518 &tir_attr.rx_hash_field_selector_outer;
2519 #else
2520 struct mlx5_rx_hash_field_select *rx_hash_field_select =
2521 &tir_attr.rx_hash_field_selector_outer;
2522 #endif
2523
2524 /* 1 bit: 0: IPv4, 1: IPv6. */
2525 rx_hash_field_select->l3_prot_type =
2526 !!(hash_fields & MLX5_IPV6_IBV_RX_HASH);
2527 /* 1 bit: 0: TCP, 1: UDP. */
2528 rx_hash_field_select->l4_prot_type =
2529 !!(hash_fields & MLX5_UDP_IBV_RX_HASH);
2530 /* Bitmask which sets which fields to use in RX Hash. */
2531 rx_hash_field_select->selected_fields =
2532 ((!!(hash_fields & MLX5_L3_SRC_IBV_RX_HASH)) <<
2533 MLX5_RX_HASH_FIELD_SELECT_SELECTED_FIELDS_SRC_IP) |
2534 (!!(hash_fields & MLX5_L3_DST_IBV_RX_HASH)) <<
2535 MLX5_RX_HASH_FIELD_SELECT_SELECTED_FIELDS_DST_IP |
2536 (!!(hash_fields & MLX5_L4_SRC_IBV_RX_HASH)) <<
2537 MLX5_RX_HASH_FIELD_SELECT_SELECTED_FIELDS_L4_SPORT |
2538 (!!(hash_fields & MLX5_L4_DST_IBV_RX_HASH)) <<
2539 MLX5_RX_HASH_FIELD_SELECT_SELECTED_FIELDS_L4_DPORT;
2540 }
2541 if (rxq_ctrl->obj->type == MLX5_RXQ_OBJ_TYPE_DEVX_HAIRPIN)
2542 tir_attr.transport_domain = priv->sh->td->id;
2543 else
2544 tir_attr.transport_domain = priv->sh->tdn;
2545 memcpy(tir_attr.rx_hash_toeplitz_key, rss_key,
2546 MLX5_RSS_HASH_KEY_LEN);
2547 tir_attr.indirect_table = ind_tbl->rqt->id;
2548 if (dev->data->dev_conf.lpbk_mode)
2549 tir_attr.self_lb_block =
2550 MLX5_TIRC_SELF_LB_BLOCK_BLOCK_UNICAST;
2551 if (lro) {
2552 tir_attr.lro_timeout_period_usecs =
2553 priv->config.lro.timeout;
2554 tir_attr.lro_max_msg_sz = priv->max_lro_msg_size;
2555 tir_attr.lro_enable_mask =
2556 MLX5_TIRC_LRO_ENABLE_MASK_IPV4_LRO |
2557 MLX5_TIRC_LRO_ENABLE_MASK_IPV6_LRO;
2558 }
2559 tir = mlx5_devx_cmd_create_tir(priv->sh->ctx, &tir_attr);
2560 if (!tir) {
2561 DRV_LOG(ERR, "port %u cannot create DevX TIR",
2562 dev->data->port_id);
2563 rte_errno = errno;
2564 goto error;
2565 }
2566 }
2567 hrxq = mlx5_ipool_zmalloc(priv->sh->ipool[MLX5_IPOOL_HRXQ], &hrxq_idx);
2568 if (!hrxq)
2569 goto error;
2570 hrxq->ind_table = ind_tbl;
2571 if (ind_tbl->type == MLX5_IND_TBL_TYPE_IBV) {
2572 hrxq->qp = qp;
2573 #ifdef HAVE_IBV_FLOW_DV_SUPPORT
2574 hrxq->action =
2575 mlx5_glue->dv_create_flow_action_dest_ibv_qp(hrxq->qp);
2576 if (!hrxq->action) {
2577 rte_errno = errno;
2578 goto error;
2579 }
2580 #endif
2581 } else { /* ind_tbl->type == MLX5_IND_TBL_TYPE_DEVX */
2582 hrxq->tir = tir;
2583 #ifdef HAVE_IBV_FLOW_DV_SUPPORT
2584 hrxq->action = mlx5_glue->dv_create_flow_action_dest_devx_tir
2585 (hrxq->tir->obj);
2586 if (!hrxq->action) {
2587 rte_errno = errno;
2588 goto error;
2589 }
2590 #endif
2591 }
2592 hrxq->rss_key_len = rss_key_len;
2593 hrxq->hash_fields = hash_fields;
2594 memcpy(hrxq->rss_key, rss_key, rss_key_len);
2595 rte_atomic32_inc(&hrxq->refcnt);
2596 ILIST_INSERT(priv->sh->ipool[MLX5_IPOOL_HRXQ], &priv->hrxqs, hrxq_idx,
2597 hrxq, next);
2598 return hrxq_idx;
2599 error:
2600 err = rte_errno; /* Save rte_errno before cleanup. */
2601 mlx5_ind_table_obj_release(dev, ind_tbl);
2602 if (qp)
2603 claim_zero(mlx5_glue->destroy_qp(qp));
2604 else if (tir)
2605 claim_zero(mlx5_devx_cmd_destroy(tir));
2606 rte_errno = err; /* Restore rte_errno. */
2607 return 0;
2608 }
2609
2610 /**
2611 * Get an Rx Hash queue.
2612 *
2613 * @param dev
2614 * Pointer to Ethernet device.
2615 * @param rss_conf
2616 * RSS configuration for the Rx hash queue.
2617 * @param queues
2618 * Queues entering in hash queue. In case of empty hash_fields only the
2619 * first queue index will be taken for the indirection table.
2620 * @param queues_n
2621 * Number of queues.
2622 *
2623 * @return
2624 * An hash Rx queue index on success.
2625 */
2626 uint32_t
2627 mlx5_hrxq_get(struct rte_eth_dev *dev,
2628 const uint8_t *rss_key, uint32_t rss_key_len,
2629 uint64_t hash_fields,
2630 const uint16_t *queues, uint32_t queues_n)
2631 {
2632 struct mlx5_priv *priv = dev->data->dev_private;
2633 struct mlx5_hrxq *hrxq;
2634 uint32_t idx;
2635
2636 queues_n = hash_fields ? queues_n : 1;
2637 ILIST_FOREACH(priv->sh->ipool[MLX5_IPOOL_HRXQ], priv->hrxqs, idx,
2638 hrxq, next) {
2639 struct mlx5_ind_table_obj *ind_tbl;
2640
2641 if (hrxq->rss_key_len != rss_key_len)
2642 continue;
2643 if (memcmp(hrxq->rss_key, rss_key, rss_key_len))
2644 continue;
2645 if (hrxq->hash_fields != hash_fields)
2646 continue;
2647 ind_tbl = mlx5_ind_table_obj_get(dev, queues, queues_n);
2648 if (!ind_tbl)
2649 continue;
2650 if (ind_tbl != hrxq->ind_table) {
2651 mlx5_ind_table_obj_release(dev, ind_tbl);
2652 continue;
2653 }
2654 rte_atomic32_inc(&hrxq->refcnt);
2655 return idx;
2656 }
2657 return 0;
2658 }
2659
2660 /**
2661 * Release the hash Rx queue.
2662 *
2663 * @param dev
2664 * Pointer to Ethernet device.
2665 * @param hrxq
2666 * Index to Hash Rx queue to release.
2667 *
2668 * @return
2669 * 1 while a reference on it exists, 0 when freed.
2670 */
2671 int
2672 mlx5_hrxq_release(struct rte_eth_dev *dev, uint32_t hrxq_idx)
2673 {
2674 struct mlx5_priv *priv = dev->data->dev_private;
2675 struct mlx5_hrxq *hrxq;
2676
2677 hrxq = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_HRXQ], hrxq_idx);
2678 if (!hrxq)
2679 return 0;
2680 if (rte_atomic32_dec_and_test(&hrxq->refcnt)) {
2681 #ifdef HAVE_IBV_FLOW_DV_SUPPORT
2682 mlx5_glue->destroy_flow_action(hrxq->action);
2683 #endif
2684 if (hrxq->ind_table->type == MLX5_IND_TBL_TYPE_IBV)
2685 claim_zero(mlx5_glue->destroy_qp(hrxq->qp));
2686 else /* hrxq->ind_table->type == MLX5_IND_TBL_TYPE_DEVX */
2687 claim_zero(mlx5_devx_cmd_destroy(hrxq->tir));
2688 mlx5_ind_table_obj_release(dev, hrxq->ind_table);
2689 ILIST_REMOVE(priv->sh->ipool[MLX5_IPOOL_HRXQ], &priv->hrxqs,
2690 hrxq_idx, hrxq, next);
2691 mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_HRXQ], hrxq_idx);
2692 return 0;
2693 }
2694 claim_nonzero(mlx5_ind_table_obj_release(dev, hrxq->ind_table));
2695 return 1;
2696 }
2697
2698 /**
2699 * Verify the Rx Queue list is empty
2700 *
2701 * @param dev
2702 * Pointer to Ethernet device.
2703 *
2704 * @return
2705 * The number of object not released.
2706 */
2707 int
2708 mlx5_hrxq_verify(struct rte_eth_dev *dev)
2709 {
2710 struct mlx5_priv *priv = dev->data->dev_private;
2711 struct mlx5_hrxq *hrxq;
2712 uint32_t idx;
2713 int ret = 0;
2714
2715 ILIST_FOREACH(priv->sh->ipool[MLX5_IPOOL_HRXQ], priv->hrxqs, idx,
2716 hrxq, next) {
2717 DRV_LOG(DEBUG,
2718 "port %u hash Rx queue %p still referenced",
2719 dev->data->port_id, (void *)hrxq);
2720 ++ret;
2721 }
2722 return ret;
2723 }
2724
2725 /**
2726 * Create a drop Rx queue Verbs/DevX object.
2727 *
2728 * @param dev
2729 * Pointer to Ethernet device.
2730 *
2731 * @return
2732 * The Verbs/DevX object initialised, NULL otherwise and rte_errno is set.
2733 */
2734 static struct mlx5_rxq_obj *
2735 mlx5_rxq_obj_drop_new(struct rte_eth_dev *dev)
2736 {
2737 struct mlx5_priv *priv = dev->data->dev_private;
2738 struct ibv_context *ctx = priv->sh->ctx;
2739 struct ibv_cq *cq;
2740 struct ibv_wq *wq = NULL;
2741 struct mlx5_rxq_obj *rxq;
2742
2743 if (priv->drop_queue.rxq)
2744 return priv->drop_queue.rxq;
2745 cq = mlx5_glue->create_cq(ctx, 1, NULL, NULL, 0);
2746 if (!cq) {
2747 DEBUG("port %u cannot allocate CQ for drop queue",
2748 dev->data->port_id);
2749 rte_errno = errno;
2750 goto error;
2751 }
2752 wq = mlx5_glue->create_wq(ctx,
2753 &(struct ibv_wq_init_attr){
2754 .wq_type = IBV_WQT_RQ,
2755 .max_wr = 1,
2756 .max_sge = 1,
2757 .pd = priv->sh->pd,
2758 .cq = cq,
2759 });
2760 if (!wq) {
2761 DEBUG("port %u cannot allocate WQ for drop queue",
2762 dev->data->port_id);
2763 rte_errno = errno;
2764 goto error;
2765 }
2766 rxq = rte_calloc(__func__, 1, sizeof(*rxq), 0);
2767 if (!rxq) {
2768 DEBUG("port %u cannot allocate drop Rx queue memory",
2769 dev->data->port_id);
2770 rte_errno = ENOMEM;
2771 goto error;
2772 }
2773 rxq->cq = cq;
2774 rxq->wq = wq;
2775 priv->drop_queue.rxq = rxq;
2776 return rxq;
2777 error:
2778 if (wq)
2779 claim_zero(mlx5_glue->destroy_wq(wq));
2780 if (cq)
2781 claim_zero(mlx5_glue->destroy_cq(cq));
2782 return NULL;
2783 }
2784
2785 /**
2786 * Release a drop Rx queue Verbs/DevX object.
2787 *
2788 * @param dev
2789 * Pointer to Ethernet device.
2790 *
2791 * @return
2792 * The Verbs/DevX object initialised, NULL otherwise and rte_errno is set.
2793 */
2794 static void
2795 mlx5_rxq_obj_drop_release(struct rte_eth_dev *dev)
2796 {
2797 struct mlx5_priv *priv = dev->data->dev_private;
2798 struct mlx5_rxq_obj *rxq = priv->drop_queue.rxq;
2799
2800 if (rxq->wq)
2801 claim_zero(mlx5_glue->destroy_wq(rxq->wq));
2802 if (rxq->cq)
2803 claim_zero(mlx5_glue->destroy_cq(rxq->cq));
2804 rte_free(rxq);
2805 priv->drop_queue.rxq = NULL;
2806 }
2807
2808 /**
2809 * Create a drop indirection table.
2810 *
2811 * @param dev
2812 * Pointer to Ethernet device.
2813 *
2814 * @return
2815 * The Verbs/DevX object initialised, NULL otherwise and rte_errno is set.
2816 */
2817 static struct mlx5_ind_table_obj *
2818 mlx5_ind_table_obj_drop_new(struct rte_eth_dev *dev)
2819 {
2820 struct mlx5_priv *priv = dev->data->dev_private;
2821 struct mlx5_ind_table_obj *ind_tbl;
2822 struct mlx5_rxq_obj *rxq;
2823 struct mlx5_ind_table_obj tmpl;
2824
2825 rxq = mlx5_rxq_obj_drop_new(dev);
2826 if (!rxq)
2827 return NULL;
2828 tmpl.ind_table = mlx5_glue->create_rwq_ind_table
2829 (priv->sh->ctx,
2830 &(struct ibv_rwq_ind_table_init_attr){
2831 .log_ind_tbl_size = 0,
2832 .ind_tbl = &rxq->wq,
2833 .comp_mask = 0,
2834 });
2835 if (!tmpl.ind_table) {
2836 DEBUG("port %u cannot allocate indirection table for drop"
2837 " queue",
2838 dev->data->port_id);
2839 rte_errno = errno;
2840 goto error;
2841 }
2842 ind_tbl = rte_calloc(__func__, 1, sizeof(*ind_tbl), 0);
2843 if (!ind_tbl) {
2844 rte_errno = ENOMEM;
2845 goto error;
2846 }
2847 ind_tbl->ind_table = tmpl.ind_table;
2848 return ind_tbl;
2849 error:
2850 mlx5_rxq_obj_drop_release(dev);
2851 return NULL;
2852 }
2853
2854 /**
2855 * Release a drop indirection table.
2856 *
2857 * @param dev
2858 * Pointer to Ethernet device.
2859 */
2860 static void
2861 mlx5_ind_table_obj_drop_release(struct rte_eth_dev *dev)
2862 {
2863 struct mlx5_priv *priv = dev->data->dev_private;
2864 struct mlx5_ind_table_obj *ind_tbl = priv->drop_queue.hrxq->ind_table;
2865
2866 claim_zero(mlx5_glue->destroy_rwq_ind_table(ind_tbl->ind_table));
2867 mlx5_rxq_obj_drop_release(dev);
2868 rte_free(ind_tbl);
2869 priv->drop_queue.hrxq->ind_table = NULL;
2870 }
2871
2872 /**
2873 * Create a drop Rx Hash queue.
2874 *
2875 * @param dev
2876 * Pointer to Ethernet device.
2877 *
2878 * @return
2879 * The Verbs/DevX object initialised, NULL otherwise and rte_errno is set.
2880 */
2881 struct mlx5_hrxq *
2882 mlx5_hrxq_drop_new(struct rte_eth_dev *dev)
2883 {
2884 struct mlx5_priv *priv = dev->data->dev_private;
2885 struct mlx5_ind_table_obj *ind_tbl = NULL;
2886 struct ibv_qp *qp = NULL;
2887 struct mlx5_hrxq *hrxq = NULL;
2888
2889 if (priv->drop_queue.hrxq) {
2890 rte_atomic32_inc(&priv->drop_queue.hrxq->refcnt);
2891 return priv->drop_queue.hrxq;
2892 }
2893 hrxq = rte_calloc(__func__, 1, sizeof(*hrxq), 0);
2894 if (!hrxq) {
2895 DRV_LOG(WARNING,
2896 "port %u cannot allocate memory for drop queue",
2897 dev->data->port_id);
2898 rte_errno = ENOMEM;
2899 goto error;
2900 }
2901 priv->drop_queue.hrxq = hrxq;
2902 ind_tbl = mlx5_ind_table_obj_drop_new(dev);
2903 if (!ind_tbl)
2904 goto error;
2905 hrxq->ind_table = ind_tbl;
2906 qp = mlx5_glue->create_qp_ex(priv->sh->ctx,
2907 &(struct ibv_qp_init_attr_ex){
2908 .qp_type = IBV_QPT_RAW_PACKET,
2909 .comp_mask =
2910 IBV_QP_INIT_ATTR_PD |
2911 IBV_QP_INIT_ATTR_IND_TABLE |
2912 IBV_QP_INIT_ATTR_RX_HASH,
2913 .rx_hash_conf = (struct ibv_rx_hash_conf){
2914 .rx_hash_function =
2915 IBV_RX_HASH_FUNC_TOEPLITZ,
2916 .rx_hash_key_len = MLX5_RSS_HASH_KEY_LEN,
2917 .rx_hash_key = rss_hash_default_key,
2918 .rx_hash_fields_mask = 0,
2919 },
2920 .rwq_ind_tbl = ind_tbl->ind_table,
2921 .pd = priv->sh->pd
2922 });
2923 if (!qp) {
2924 DEBUG("port %u cannot allocate QP for drop queue",
2925 dev->data->port_id);
2926 rte_errno = errno;
2927 goto error;
2928 }
2929 hrxq->qp = qp;
2930 #ifdef HAVE_IBV_FLOW_DV_SUPPORT
2931 hrxq->action = mlx5_glue->dv_create_flow_action_dest_ibv_qp(hrxq->qp);
2932 if (!hrxq->action) {
2933 rte_errno = errno;
2934 goto error;
2935 }
2936 #endif
2937 rte_atomic32_set(&hrxq->refcnt, 1);
2938 return hrxq;
2939 error:
2940 #ifdef HAVE_IBV_FLOW_DV_SUPPORT
2941 if (hrxq && hrxq->action)
2942 mlx5_glue->destroy_flow_action(hrxq->action);
2943 #endif
2944 if (qp)
2945 claim_zero(mlx5_glue->destroy_qp(hrxq->qp));
2946 if (ind_tbl)
2947 mlx5_ind_table_obj_drop_release(dev);
2948 if (hrxq) {
2949 priv->drop_queue.hrxq = NULL;
2950 rte_free(hrxq);
2951 }
2952 return NULL;
2953 }
2954
2955 /**
2956 * Release a drop hash Rx queue.
2957 *
2958 * @param dev
2959 * Pointer to Ethernet device.
2960 */
2961 void
2962 mlx5_hrxq_drop_release(struct rte_eth_dev *dev)
2963 {
2964 struct mlx5_priv *priv = dev->data->dev_private;
2965 struct mlx5_hrxq *hrxq = priv->drop_queue.hrxq;
2966
2967 if (rte_atomic32_dec_and_test(&hrxq->refcnt)) {
2968 #ifdef HAVE_IBV_FLOW_DV_SUPPORT
2969 mlx5_glue->destroy_flow_action(hrxq->action);
2970 #endif
2971 claim_zero(mlx5_glue->destroy_qp(hrxq->qp));
2972 mlx5_ind_table_obj_drop_release(dev);
2973 rte_free(hrxq);
2974 priv->drop_queue.hrxq = NULL;
2975 }
2976 }