1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2017 Intel Corporation
13 #include <sys/queue.h>
15 #include <rte_string_fns.h>
16 #include <rte_memzone.h>
18 #include <rte_malloc.h>
19 #include <rte_ether.h>
20 #include <rte_ethdev_driver.h>
28 #include "base/iavf_prototype.h"
29 #include "base/iavf_type.h"
31 #include "iavf_rxtx.h"
34 check_rx_thresh(uint16_t nb_desc
, uint16_t thresh
)
36 /* The following constraints must be satisfied:
37 * thresh < rxq->nb_rx_desc
39 if (thresh
>= nb_desc
) {
40 PMD_INIT_LOG(ERR
, "rx_free_thresh (%u) must be less than %u",
48 check_tx_thresh(uint16_t nb_desc
, uint16_t tx_rs_thresh
,
49 uint16_t tx_free_thresh
)
51 /* TX descriptors will have their RS bit set after tx_rs_thresh
52 * descriptors have been used. The TX descriptor ring will be cleaned
53 * after tx_free_thresh descriptors are used or if the number of
54 * descriptors required to transmit a packet is greater than the
55 * number of free TX descriptors.
57 * The following constraints must be satisfied:
58 * - tx_rs_thresh must be less than the size of the ring minus 2.
59 * - tx_free_thresh must be less than the size of the ring minus 3.
60 * - tx_rs_thresh must be less than or equal to tx_free_thresh.
61 * - tx_rs_thresh must be a divisor of the ring size.
63 * One descriptor in the TX ring is used as a sentinel to avoid a H/W
64 * race condition, hence the maximum threshold constraints. When set
65 * to zero use default values.
67 if (tx_rs_thresh
>= (nb_desc
- 2)) {
68 PMD_INIT_LOG(ERR
, "tx_rs_thresh (%u) must be less than the "
69 "number of TX descriptors (%u) minus 2",
70 tx_rs_thresh
, nb_desc
);
73 if (tx_free_thresh
>= (nb_desc
- 3)) {
74 PMD_INIT_LOG(ERR
, "tx_free_thresh (%u) must be less than the "
75 "number of TX descriptors (%u) minus 3.",
76 tx_free_thresh
, nb_desc
);
79 if (tx_rs_thresh
> tx_free_thresh
) {
80 PMD_INIT_LOG(ERR
, "tx_rs_thresh (%u) must be less than or "
81 "equal to tx_free_thresh (%u).",
82 tx_rs_thresh
, tx_free_thresh
);
85 if ((nb_desc
% tx_rs_thresh
) != 0) {
86 PMD_INIT_LOG(ERR
, "tx_rs_thresh (%u) must be a divisor of the "
87 "number of TX descriptors (%u).",
88 tx_rs_thresh
, nb_desc
);
95 #ifdef RTE_LIBRTE_IAVF_INC_VECTOR
97 check_rx_vec_allow(struct iavf_rx_queue
*rxq
)
99 if (rxq
->rx_free_thresh
>= IAVF_VPMD_RX_MAX_BURST
&&
100 rxq
->nb_rx_desc
% rxq
->rx_free_thresh
== 0) {
101 PMD_INIT_LOG(DEBUG
, "Vector Rx can be enabled on this rxq.");
105 PMD_INIT_LOG(DEBUG
, "Vector Rx cannot be enabled on this rxq.");
110 check_tx_vec_allow(struct iavf_tx_queue
*txq
)
112 if (!(txq
->offloads
& IAVF_NO_VECTOR_FLAGS
) &&
113 txq
->rs_thresh
>= IAVF_VPMD_TX_MAX_BURST
&&
114 txq
->rs_thresh
<= IAVF_VPMD_TX_MAX_FREE_BUF
) {
115 PMD_INIT_LOG(DEBUG
, "Vector tx can be enabled on this txq.");
118 PMD_INIT_LOG(DEBUG
, "Vector Tx cannot be enabled on this txq.");
124 check_rx_bulk_allow(struct iavf_rx_queue
*rxq
)
128 if (!(rxq
->rx_free_thresh
>= IAVF_RX_MAX_BURST
)) {
129 PMD_INIT_LOG(DEBUG
, "Rx Burst Bulk Alloc Preconditions: "
130 "rxq->rx_free_thresh=%d, "
131 "IAVF_RX_MAX_BURST=%d",
132 rxq
->rx_free_thresh
, IAVF_RX_MAX_BURST
);
134 } else if (rxq
->nb_rx_desc
% rxq
->rx_free_thresh
!= 0) {
135 PMD_INIT_LOG(DEBUG
, "Rx Burst Bulk Alloc Preconditions: "
136 "rxq->nb_rx_desc=%d, "
137 "rxq->rx_free_thresh=%d",
138 rxq
->nb_rx_desc
, rxq
->rx_free_thresh
);
145 reset_rx_queue(struct iavf_rx_queue
*rxq
)
152 len
= rxq
->nb_rx_desc
+ IAVF_RX_MAX_BURST
;
154 for (i
= 0; i
< len
* sizeof(union iavf_rx_desc
); i
++)
155 ((volatile char *)rxq
->rx_ring
)[i
] = 0;
157 memset(&rxq
->fake_mbuf
, 0x0, sizeof(rxq
->fake_mbuf
));
159 for (i
= 0; i
< IAVF_RX_MAX_BURST
; i
++)
160 rxq
->sw_ring
[rxq
->nb_rx_desc
+ i
] = &rxq
->fake_mbuf
;
163 rxq
->rx_nb_avail
= 0;
164 rxq
->rx_next_avail
= 0;
165 rxq
->rx_free_trigger
= (uint16_t)(rxq
->rx_free_thresh
- 1);
169 rxq
->pkt_first_seg
= NULL
;
170 rxq
->pkt_last_seg
= NULL
;
174 reset_tx_queue(struct iavf_tx_queue
*txq
)
176 struct iavf_tx_entry
*txe
;
177 uint16_t i
, prev
, size
;
180 PMD_DRV_LOG(DEBUG
, "Pointer to txq is NULL");
185 size
= sizeof(struct iavf_tx_desc
) * txq
->nb_tx_desc
;
186 for (i
= 0; i
< size
; i
++)
187 ((volatile char *)txq
->tx_ring
)[i
] = 0;
189 prev
= (uint16_t)(txq
->nb_tx_desc
- 1);
190 for (i
= 0; i
< txq
->nb_tx_desc
; i
++) {
191 txq
->tx_ring
[i
].cmd_type_offset_bsz
=
192 rte_cpu_to_le_64(IAVF_TX_DESC_DTYPE_DESC_DONE
);
195 txe
[prev
].next_id
= i
;
202 txq
->last_desc_cleaned
= txq
->nb_tx_desc
- 1;
203 txq
->nb_free
= txq
->nb_tx_desc
- 1;
205 txq
->next_dd
= txq
->rs_thresh
- 1;
206 txq
->next_rs
= txq
->rs_thresh
- 1;
210 alloc_rxq_mbufs(struct iavf_rx_queue
*rxq
)
212 volatile union iavf_rx_desc
*rxd
;
213 struct rte_mbuf
*mbuf
= NULL
;
217 for (i
= 0; i
< rxq
->nb_rx_desc
; i
++) {
218 mbuf
= rte_mbuf_raw_alloc(rxq
->mp
);
219 if (unlikely(!mbuf
)) {
220 PMD_DRV_LOG(ERR
, "Failed to allocate mbuf for RX");
224 rte_mbuf_refcnt_set(mbuf
, 1);
226 mbuf
->data_off
= RTE_PKTMBUF_HEADROOM
;
228 mbuf
->port
= rxq
->port_id
;
231 rte_cpu_to_le_64(rte_mbuf_data_iova_default(mbuf
));
233 rxd
= &rxq
->rx_ring
[i
];
234 rxd
->read
.pkt_addr
= dma_addr
;
235 rxd
->read
.hdr_addr
= 0;
236 #ifndef RTE_LIBRTE_IAVF_16BYTE_RX_DESC
241 rxq
->sw_ring
[i
] = mbuf
;
248 release_rxq_mbufs(struct iavf_rx_queue
*rxq
)
255 for (i
= 0; i
< rxq
->nb_rx_desc
; i
++) {
256 if (rxq
->sw_ring
[i
]) {
257 rte_pktmbuf_free_seg(rxq
->sw_ring
[i
]);
258 rxq
->sw_ring
[i
] = NULL
;
263 if (rxq
->rx_nb_avail
== 0)
265 for (i
= 0; i
< rxq
->rx_nb_avail
; i
++) {
266 struct rte_mbuf
*mbuf
;
268 mbuf
= rxq
->rx_stage
[rxq
->rx_next_avail
+ i
];
269 rte_pktmbuf_free_seg(mbuf
);
271 rxq
->rx_nb_avail
= 0;
275 release_txq_mbufs(struct iavf_tx_queue
*txq
)
279 if (!txq
|| !txq
->sw_ring
) {
280 PMD_DRV_LOG(DEBUG
, "Pointer to rxq or sw_ring is NULL");
284 for (i
= 0; i
< txq
->nb_tx_desc
; i
++) {
285 if (txq
->sw_ring
[i
].mbuf
) {
286 rte_pktmbuf_free_seg(txq
->sw_ring
[i
].mbuf
);
287 txq
->sw_ring
[i
].mbuf
= NULL
;
292 static const struct iavf_rxq_ops def_rxq_ops
= {
293 .release_mbufs
= release_rxq_mbufs
,
296 static const struct iavf_txq_ops def_txq_ops
= {
297 .release_mbufs
= release_txq_mbufs
,
301 iavf_dev_rx_queue_setup(struct rte_eth_dev
*dev
, uint16_t queue_idx
,
302 uint16_t nb_desc
, unsigned int socket_id
,
303 const struct rte_eth_rxconf
*rx_conf
,
304 struct rte_mempool
*mp
)
306 struct iavf_hw
*hw
= IAVF_DEV_PRIVATE_TO_HW(dev
->data
->dev_private
);
307 struct iavf_adapter
*ad
=
308 IAVF_DEV_PRIVATE_TO_ADAPTER(dev
->data
->dev_private
);
309 struct iavf_rx_queue
*rxq
;
310 const struct rte_memzone
*mz
;
313 uint16_t rx_free_thresh
;
315 PMD_INIT_FUNC_TRACE();
317 if (nb_desc
% IAVF_ALIGN_RING_DESC
!= 0 ||
318 nb_desc
> IAVF_MAX_RING_DESC
||
319 nb_desc
< IAVF_MIN_RING_DESC
) {
320 PMD_INIT_LOG(ERR
, "Number (%u) of receive descriptors is "
325 /* Check free threshold */
326 rx_free_thresh
= (rx_conf
->rx_free_thresh
== 0) ?
327 IAVF_DEFAULT_RX_FREE_THRESH
:
328 rx_conf
->rx_free_thresh
;
329 if (check_rx_thresh(nb_desc
, rx_free_thresh
) != 0)
332 /* Free memory if needed */
333 if (dev
->data
->rx_queues
[queue_idx
]) {
334 iavf_dev_rx_queue_release(dev
->data
->rx_queues
[queue_idx
]);
335 dev
->data
->rx_queues
[queue_idx
] = NULL
;
338 /* Allocate the rx queue data structure */
339 rxq
= rte_zmalloc_socket("iavf rxq",
340 sizeof(struct iavf_rx_queue
),
344 PMD_INIT_LOG(ERR
, "Failed to allocate memory for "
345 "rx queue data structure");
350 rxq
->nb_rx_desc
= nb_desc
;
351 rxq
->rx_free_thresh
= rx_free_thresh
;
352 rxq
->queue_id
= queue_idx
;
353 rxq
->port_id
= dev
->data
->port_id
;
354 rxq
->crc_len
= 0; /* crc stripping by default */
355 rxq
->rx_deferred_start
= rx_conf
->rx_deferred_start
;
358 len
= rte_pktmbuf_data_room_size(rxq
->mp
) - RTE_PKTMBUF_HEADROOM
;
359 rxq
->rx_buf_len
= RTE_ALIGN(len
, (1 << IAVF_RXQ_CTX_DBUFF_SHIFT
));
361 /* Allocate the software ring. */
362 len
= nb_desc
+ IAVF_RX_MAX_BURST
;
364 rte_zmalloc_socket("iavf rx sw ring",
365 sizeof(struct rte_mbuf
*) * len
,
369 PMD_INIT_LOG(ERR
, "Failed to allocate memory for SW ring");
374 /* Allocate the maximun number of RX ring hardware descriptor with
375 * a liitle more to support bulk allocate.
377 len
= IAVF_MAX_RING_DESC
+ IAVF_RX_MAX_BURST
;
378 ring_size
= RTE_ALIGN(len
* sizeof(union iavf_rx_desc
),
380 mz
= rte_eth_dma_zone_reserve(dev
, "rx_ring", queue_idx
,
381 ring_size
, IAVF_RING_BASE_ALIGN
,
384 PMD_INIT_LOG(ERR
, "Failed to reserve DMA memory for RX");
385 rte_free(rxq
->sw_ring
);
389 /* Zero all the descriptors in the ring. */
390 memset(mz
->addr
, 0, ring_size
);
391 rxq
->rx_ring_phys_addr
= mz
->iova
;
392 rxq
->rx_ring
= (union iavf_rx_desc
*)mz
->addr
;
397 dev
->data
->rx_queues
[queue_idx
] = rxq
;
398 rxq
->qrx_tail
= hw
->hw_addr
+ IAVF_QRX_TAIL1(rxq
->queue_id
);
399 rxq
->ops
= &def_rxq_ops
;
401 if (check_rx_bulk_allow(rxq
) == TRUE
) {
402 PMD_INIT_LOG(DEBUG
, "Rx Burst Bulk Alloc Preconditions are "
403 "satisfied. Rx Burst Bulk Alloc function will be "
404 "used on port=%d, queue=%d.",
405 rxq
->port_id
, rxq
->queue_id
);
407 PMD_INIT_LOG(DEBUG
, "Rx Burst Bulk Alloc Preconditions are "
408 "not satisfied, Scattered Rx is requested "
409 "on port=%d, queue=%d.",
410 rxq
->port_id
, rxq
->queue_id
);
411 ad
->rx_bulk_alloc_allowed
= false;
414 #ifdef RTE_LIBRTE_IAVF_INC_VECTOR
415 if (check_rx_vec_allow(rxq
) == FALSE
)
416 ad
->rx_vec_allowed
= false;
422 iavf_dev_tx_queue_setup(struct rte_eth_dev
*dev
,
425 unsigned int socket_id
,
426 const struct rte_eth_txconf
*tx_conf
)
428 struct iavf_hw
*hw
= IAVF_DEV_PRIVATE_TO_HW(dev
->data
->dev_private
);
429 struct iavf_tx_queue
*txq
;
430 const struct rte_memzone
*mz
;
432 uint16_t tx_rs_thresh
, tx_free_thresh
;
435 PMD_INIT_FUNC_TRACE();
437 offloads
= tx_conf
->offloads
| dev
->data
->dev_conf
.txmode
.offloads
;
439 if (nb_desc
% IAVF_ALIGN_RING_DESC
!= 0 ||
440 nb_desc
> IAVF_MAX_RING_DESC
||
441 nb_desc
< IAVF_MIN_RING_DESC
) {
442 PMD_INIT_LOG(ERR
, "Number (%u) of transmit descriptors is "
447 tx_rs_thresh
= (uint16_t)((tx_conf
->tx_rs_thresh
) ?
448 tx_conf
->tx_rs_thresh
: DEFAULT_TX_RS_THRESH
);
449 tx_free_thresh
= (uint16_t)((tx_conf
->tx_free_thresh
) ?
450 tx_conf
->tx_free_thresh
: DEFAULT_TX_FREE_THRESH
);
451 check_tx_thresh(nb_desc
, tx_rs_thresh
, tx_rs_thresh
);
453 /* Free memory if needed. */
454 if (dev
->data
->tx_queues
[queue_idx
]) {
455 iavf_dev_tx_queue_release(dev
->data
->tx_queues
[queue_idx
]);
456 dev
->data
->tx_queues
[queue_idx
] = NULL
;
459 /* Allocate the TX queue data structure. */
460 txq
= rte_zmalloc_socket("iavf txq",
461 sizeof(struct iavf_tx_queue
),
465 PMD_INIT_LOG(ERR
, "Failed to allocate memory for "
466 "tx queue structure");
470 txq
->nb_tx_desc
= nb_desc
;
471 txq
->rs_thresh
= tx_rs_thresh
;
472 txq
->free_thresh
= tx_free_thresh
;
473 txq
->queue_id
= queue_idx
;
474 txq
->port_id
= dev
->data
->port_id
;
475 txq
->offloads
= offloads
;
476 txq
->tx_deferred_start
= tx_conf
->tx_deferred_start
;
478 /* Allocate software ring */
480 rte_zmalloc_socket("iavf tx sw ring",
481 sizeof(struct iavf_tx_entry
) * nb_desc
,
485 PMD_INIT_LOG(ERR
, "Failed to allocate memory for SW TX ring");
490 /* Allocate TX hardware ring descriptors. */
491 ring_size
= sizeof(struct iavf_tx_desc
) * IAVF_MAX_RING_DESC
;
492 ring_size
= RTE_ALIGN(ring_size
, IAVF_DMA_MEM_ALIGN
);
493 mz
= rte_eth_dma_zone_reserve(dev
, "tx_ring", queue_idx
,
494 ring_size
, IAVF_RING_BASE_ALIGN
,
497 PMD_INIT_LOG(ERR
, "Failed to reserve DMA memory for TX");
498 rte_free(txq
->sw_ring
);
502 txq
->tx_ring_phys_addr
= mz
->iova
;
503 txq
->tx_ring
= (struct iavf_tx_desc
*)mz
->addr
;
508 dev
->data
->tx_queues
[queue_idx
] = txq
;
509 txq
->qtx_tail
= hw
->hw_addr
+ IAVF_QTX_TAIL1(queue_idx
);
510 txq
->ops
= &def_txq_ops
;
512 #ifdef RTE_LIBRTE_IAVF_INC_VECTOR
513 if (check_tx_vec_allow(txq
) == FALSE
) {
514 struct iavf_adapter
*ad
=
515 IAVF_DEV_PRIVATE_TO_ADAPTER(dev
->data
->dev_private
);
516 ad
->tx_vec_allowed
= false;
524 iavf_dev_rx_queue_start(struct rte_eth_dev
*dev
, uint16_t rx_queue_id
)
526 struct iavf_adapter
*adapter
=
527 IAVF_DEV_PRIVATE_TO_ADAPTER(dev
->data
->dev_private
);
528 struct iavf_hw
*hw
= IAVF_DEV_PRIVATE_TO_HW(dev
->data
->dev_private
);
529 struct iavf_rx_queue
*rxq
;
532 PMD_DRV_FUNC_TRACE();
534 if (rx_queue_id
>= dev
->data
->nb_rx_queues
)
537 rxq
= dev
->data
->rx_queues
[rx_queue_id
];
539 err
= alloc_rxq_mbufs(rxq
);
541 PMD_DRV_LOG(ERR
, "Failed to allocate RX queue mbuf");
547 /* Init the RX tail register. */
548 IAVF_PCI_REG_WRITE(rxq
->qrx_tail
, rxq
->nb_rx_desc
- 1);
549 IAVF_WRITE_FLUSH(hw
);
551 /* Ready to switch the queue on */
552 err
= iavf_switch_queue(adapter
, rx_queue_id
, TRUE
, TRUE
);
554 PMD_DRV_LOG(ERR
, "Failed to switch RX queue %u on",
557 dev
->data
->rx_queue_state
[rx_queue_id
] =
558 RTE_ETH_QUEUE_STATE_STARTED
;
564 iavf_dev_tx_queue_start(struct rte_eth_dev
*dev
, uint16_t tx_queue_id
)
566 struct iavf_adapter
*adapter
=
567 IAVF_DEV_PRIVATE_TO_ADAPTER(dev
->data
->dev_private
);
568 struct iavf_hw
*hw
= IAVF_DEV_PRIVATE_TO_HW(dev
->data
->dev_private
);
569 struct iavf_tx_queue
*txq
;
572 PMD_DRV_FUNC_TRACE();
574 if (tx_queue_id
>= dev
->data
->nb_tx_queues
)
577 txq
= dev
->data
->tx_queues
[tx_queue_id
];
579 /* Init the RX tail register. */
580 IAVF_PCI_REG_WRITE(txq
->qtx_tail
, 0);
581 IAVF_WRITE_FLUSH(hw
);
583 /* Ready to switch the queue on */
584 err
= iavf_switch_queue(adapter
, tx_queue_id
, FALSE
, TRUE
);
587 PMD_DRV_LOG(ERR
, "Failed to switch TX queue %u on",
590 dev
->data
->tx_queue_state
[tx_queue_id
] =
591 RTE_ETH_QUEUE_STATE_STARTED
;
597 iavf_dev_rx_queue_stop(struct rte_eth_dev
*dev
, uint16_t rx_queue_id
)
599 struct iavf_adapter
*adapter
=
600 IAVF_DEV_PRIVATE_TO_ADAPTER(dev
->data
->dev_private
);
601 struct iavf_rx_queue
*rxq
;
604 PMD_DRV_FUNC_TRACE();
606 if (rx_queue_id
>= dev
->data
->nb_rx_queues
)
609 err
= iavf_switch_queue(adapter
, rx_queue_id
, TRUE
, FALSE
);
611 PMD_DRV_LOG(ERR
, "Failed to switch RX queue %u off",
616 rxq
= dev
->data
->rx_queues
[rx_queue_id
];
617 rxq
->ops
->release_mbufs(rxq
);
619 dev
->data
->rx_queue_state
[rx_queue_id
] = RTE_ETH_QUEUE_STATE_STOPPED
;
625 iavf_dev_tx_queue_stop(struct rte_eth_dev
*dev
, uint16_t tx_queue_id
)
627 struct iavf_adapter
*adapter
=
628 IAVF_DEV_PRIVATE_TO_ADAPTER(dev
->data
->dev_private
);
629 struct iavf_tx_queue
*txq
;
632 PMD_DRV_FUNC_TRACE();
634 if (tx_queue_id
>= dev
->data
->nb_tx_queues
)
637 err
= iavf_switch_queue(adapter
, tx_queue_id
, FALSE
, FALSE
);
639 PMD_DRV_LOG(ERR
, "Failed to switch TX queue %u off",
644 txq
= dev
->data
->tx_queues
[tx_queue_id
];
645 txq
->ops
->release_mbufs(txq
);
647 dev
->data
->tx_queue_state
[tx_queue_id
] = RTE_ETH_QUEUE_STATE_STOPPED
;
653 iavf_dev_rx_queue_release(void *rxq
)
655 struct iavf_rx_queue
*q
= (struct iavf_rx_queue
*)rxq
;
660 q
->ops
->release_mbufs(q
);
661 rte_free(q
->sw_ring
);
662 rte_memzone_free(q
->mz
);
667 iavf_dev_tx_queue_release(void *txq
)
669 struct iavf_tx_queue
*q
= (struct iavf_tx_queue
*)txq
;
674 q
->ops
->release_mbufs(q
);
675 rte_free(q
->sw_ring
);
676 rte_memzone_free(q
->mz
);
681 iavf_stop_queues(struct rte_eth_dev
*dev
)
683 struct iavf_adapter
*adapter
=
684 IAVF_DEV_PRIVATE_TO_ADAPTER(dev
->data
->dev_private
);
685 struct iavf_rx_queue
*rxq
;
686 struct iavf_tx_queue
*txq
;
689 /* Stop All queues */
690 ret
= iavf_disable_queues(adapter
);
692 PMD_DRV_LOG(WARNING
, "Fail to stop queues");
694 for (i
= 0; i
< dev
->data
->nb_tx_queues
; i
++) {
695 txq
= dev
->data
->tx_queues
[i
];
698 txq
->ops
->release_mbufs(txq
);
700 dev
->data
->tx_queue_state
[i
] = RTE_ETH_QUEUE_STATE_STOPPED
;
702 for (i
= 0; i
< dev
->data
->nb_rx_queues
; i
++) {
703 rxq
= dev
->data
->rx_queues
[i
];
706 rxq
->ops
->release_mbufs(rxq
);
708 dev
->data
->rx_queue_state
[i
] = RTE_ETH_QUEUE_STATE_STOPPED
;
713 iavf_rxd_to_vlan_tci(struct rte_mbuf
*mb
, volatile union iavf_rx_desc
*rxdp
)
715 if (rte_le_to_cpu_64(rxdp
->wb
.qword1
.status_error_len
) &
716 (1 << IAVF_RX_DESC_STATUS_L2TAG1P_SHIFT
)) {
717 mb
->ol_flags
|= PKT_RX_VLAN
| PKT_RX_VLAN_STRIPPED
;
719 rte_le_to_cpu_16(rxdp
->wb
.qword0
.lo_dword
.l2tag1
);
725 /* Translate the rx descriptor status and error fields to pkt flags */
726 static inline uint64_t
727 iavf_rxd_to_pkt_flags(uint64_t qword
)
730 uint64_t error_bits
= (qword
>> IAVF_RXD_QW1_ERROR_SHIFT
);
732 #define IAVF_RX_ERR_BITS 0x3f
734 /* Check if RSS_HASH */
735 flags
= (((qword
>> IAVF_RX_DESC_STATUS_FLTSTAT_SHIFT
) &
736 IAVF_RX_DESC_FLTSTAT_RSS_HASH
) ==
737 IAVF_RX_DESC_FLTSTAT_RSS_HASH
) ? PKT_RX_RSS_HASH
: 0;
739 if (likely((error_bits
& IAVF_RX_ERR_BITS
) == 0)) {
740 flags
|= (PKT_RX_IP_CKSUM_GOOD
| PKT_RX_L4_CKSUM_GOOD
);
744 if (unlikely(error_bits
& (1 << IAVF_RX_DESC_ERROR_IPE_SHIFT
)))
745 flags
|= PKT_RX_IP_CKSUM_BAD
;
747 flags
|= PKT_RX_IP_CKSUM_GOOD
;
749 if (unlikely(error_bits
& (1 << IAVF_RX_DESC_ERROR_L4E_SHIFT
)))
750 flags
|= PKT_RX_L4_CKSUM_BAD
;
752 flags
|= PKT_RX_L4_CKSUM_GOOD
;
754 /* TODO: Oversize error bit is not processed here */
759 /* implement recv_pkts */
761 iavf_recv_pkts(void *rx_queue
, struct rte_mbuf
**rx_pkts
, uint16_t nb_pkts
)
763 volatile union iavf_rx_desc
*rx_ring
;
764 volatile union iavf_rx_desc
*rxdp
;
765 struct iavf_rx_queue
*rxq
;
766 union iavf_rx_desc rxd
;
767 struct rte_mbuf
*rxe
;
768 struct rte_eth_dev
*dev
;
769 struct rte_mbuf
*rxm
;
770 struct rte_mbuf
*nmb
;
774 uint16_t rx_packet_len
;
775 uint16_t rx_id
, nb_hold
;
778 static const uint32_t ptype_tbl
[UINT8_MAX
+ 1] __rte_cache_aligned
= {
780 [1] = RTE_PTYPE_L2_ETHER
,
781 /* [2] - [21] reserved */
782 [22] = RTE_PTYPE_L2_ETHER
| RTE_PTYPE_L3_IPV4_EXT_UNKNOWN
|
784 [23] = RTE_PTYPE_L2_ETHER
| RTE_PTYPE_L3_IPV4_EXT_UNKNOWN
|
785 RTE_PTYPE_L4_NONFRAG
,
786 [24] = RTE_PTYPE_L2_ETHER
| RTE_PTYPE_L3_IPV4_EXT_UNKNOWN
|
789 [26] = RTE_PTYPE_L2_ETHER
| RTE_PTYPE_L3_IPV4_EXT_UNKNOWN
|
791 [27] = RTE_PTYPE_L2_ETHER
| RTE_PTYPE_L3_IPV4_EXT_UNKNOWN
|
793 [28] = RTE_PTYPE_L2_ETHER
| RTE_PTYPE_L3_IPV4_EXT_UNKNOWN
|
795 /* All others reserved */
801 rx_id
= rxq
->rx_tail
;
802 rx_ring
= rxq
->rx_ring
;
804 while (nb_rx
< nb_pkts
) {
805 rxdp
= &rx_ring
[rx_id
];
806 qword1
= rte_le_to_cpu_64(rxdp
->wb
.qword1
.status_error_len
);
807 rx_status
= (qword1
& IAVF_RXD_QW1_STATUS_MASK
) >>
808 IAVF_RXD_QW1_STATUS_SHIFT
;
810 /* Check the DD bit first */
811 if (!(rx_status
& (1 << IAVF_RX_DESC_STATUS_DD_SHIFT
)))
813 IAVF_DUMP_RX_DESC(rxq
, rxdp
, rx_id
);
815 nmb
= rte_mbuf_raw_alloc(rxq
->mp
);
816 if (unlikely(!nmb
)) {
817 dev
= &rte_eth_devices
[rxq
->port_id
];
818 dev
->data
->rx_mbuf_alloc_failed
++;
819 PMD_RX_LOG(DEBUG
, "RX mbuf alloc failed port_id=%u "
820 "queue_id=%u", rxq
->port_id
, rxq
->queue_id
);
826 rxe
= rxq
->sw_ring
[rx_id
];
828 if (unlikely(rx_id
== rxq
->nb_rx_desc
))
831 /* Prefetch next mbuf */
832 rte_prefetch0(rxq
->sw_ring
[rx_id
]);
834 /* When next RX descriptor is on a cache line boundary,
835 * prefetch the next 4 RX descriptors and next 8 pointers
838 if ((rx_id
& 0x3) == 0) {
839 rte_prefetch0(&rx_ring
[rx_id
]);
840 rte_prefetch0(rxq
->sw_ring
[rx_id
]);
845 rte_cpu_to_le_64(rte_mbuf_data_iova_default(nmb
));
846 rxdp
->read
.hdr_addr
= 0;
847 rxdp
->read
.pkt_addr
= dma_addr
;
849 rx_packet_len
= ((qword1
& IAVF_RXD_QW1_LENGTH_PBUF_MASK
) >>
850 IAVF_RXD_QW1_LENGTH_PBUF_SHIFT
) - rxq
->crc_len
;
852 rxm
->data_off
= RTE_PKTMBUF_HEADROOM
;
853 rte_prefetch0(RTE_PTR_ADD(rxm
->buf_addr
, RTE_PKTMBUF_HEADROOM
));
856 rxm
->pkt_len
= rx_packet_len
;
857 rxm
->data_len
= rx_packet_len
;
858 rxm
->port
= rxq
->port_id
;
860 iavf_rxd_to_vlan_tci(rxm
, &rxd
);
861 pkt_flags
= iavf_rxd_to_pkt_flags(qword1
);
863 ptype_tbl
[(uint8_t)((qword1
&
864 IAVF_RXD_QW1_PTYPE_MASK
) >> IAVF_RXD_QW1_PTYPE_SHIFT
)];
866 if (pkt_flags
& PKT_RX_RSS_HASH
)
868 rte_le_to_cpu_32(rxd
.wb
.qword0
.hi_dword
.rss
);
870 rxm
->ol_flags
|= pkt_flags
;
872 rx_pkts
[nb_rx
++] = rxm
;
874 rxq
->rx_tail
= rx_id
;
876 /* If the number of free RX descriptors is greater than the RX free
877 * threshold of the queue, advance the receive tail register of queue.
878 * Update that register with the value of the last processed RX
879 * descriptor minus 1.
881 nb_hold
= (uint16_t)(nb_hold
+ rxq
->nb_rx_hold
);
882 if (nb_hold
> rxq
->rx_free_thresh
) {
883 PMD_RX_LOG(DEBUG
, "port_id=%u queue_id=%u rx_tail=%u "
884 "nb_hold=%u nb_rx=%u",
885 rxq
->port_id
, rxq
->queue_id
,
886 rx_id
, nb_hold
, nb_rx
);
887 rx_id
= (uint16_t)((rx_id
== 0) ?
888 (rxq
->nb_rx_desc
- 1) : (rx_id
- 1));
889 IAVF_PCI_REG_WRITE(rxq
->qrx_tail
, rx_id
);
892 rxq
->nb_rx_hold
= nb_hold
;
897 /* implement recv_scattered_pkts */
899 iavf_recv_scattered_pkts(void *rx_queue
, struct rte_mbuf
**rx_pkts
,
902 struct iavf_rx_queue
*rxq
= rx_queue
;
903 union iavf_rx_desc rxd
;
904 struct rte_mbuf
*rxe
;
905 struct rte_mbuf
*first_seg
= rxq
->pkt_first_seg
;
906 struct rte_mbuf
*last_seg
= rxq
->pkt_last_seg
;
907 struct rte_mbuf
*nmb
, *rxm
;
908 uint16_t rx_id
= rxq
->rx_tail
;
909 uint16_t nb_rx
= 0, nb_hold
= 0, rx_packet_len
;
910 struct rte_eth_dev
*dev
;
916 volatile union iavf_rx_desc
*rx_ring
= rxq
->rx_ring
;
917 volatile union iavf_rx_desc
*rxdp
;
918 static const uint32_t ptype_tbl
[UINT8_MAX
+ 1] __rte_cache_aligned
= {
920 [1] = RTE_PTYPE_L2_ETHER
,
921 /* [2] - [21] reserved */
922 [22] = RTE_PTYPE_L2_ETHER
| RTE_PTYPE_L3_IPV4_EXT_UNKNOWN
|
924 [23] = RTE_PTYPE_L2_ETHER
| RTE_PTYPE_L3_IPV4_EXT_UNKNOWN
|
925 RTE_PTYPE_L4_NONFRAG
,
926 [24] = RTE_PTYPE_L2_ETHER
| RTE_PTYPE_L3_IPV4_EXT_UNKNOWN
|
929 [26] = RTE_PTYPE_L2_ETHER
| RTE_PTYPE_L3_IPV4_EXT_UNKNOWN
|
931 [27] = RTE_PTYPE_L2_ETHER
| RTE_PTYPE_L3_IPV4_EXT_UNKNOWN
|
933 [28] = RTE_PTYPE_L2_ETHER
| RTE_PTYPE_L3_IPV4_EXT_UNKNOWN
|
935 /* All others reserved */
938 while (nb_rx
< nb_pkts
) {
939 rxdp
= &rx_ring
[rx_id
];
940 qword1
= rte_le_to_cpu_64(rxdp
->wb
.qword1
.status_error_len
);
941 rx_status
= (qword1
& IAVF_RXD_QW1_STATUS_MASK
) >>
942 IAVF_RXD_QW1_STATUS_SHIFT
;
944 /* Check the DD bit */
945 if (!(rx_status
& (1 << IAVF_RX_DESC_STATUS_DD_SHIFT
)))
947 IAVF_DUMP_RX_DESC(rxq
, rxdp
, rx_id
);
949 nmb
= rte_mbuf_raw_alloc(rxq
->mp
);
950 if (unlikely(!nmb
)) {
951 PMD_RX_LOG(DEBUG
, "RX mbuf alloc failed port_id=%u "
952 "queue_id=%u", rxq
->port_id
, rxq
->queue_id
);
953 dev
= &rte_eth_devices
[rxq
->port_id
];
954 dev
->data
->rx_mbuf_alloc_failed
++;
960 rxe
= rxq
->sw_ring
[rx_id
];
962 if (rx_id
== rxq
->nb_rx_desc
)
965 /* Prefetch next mbuf */
966 rte_prefetch0(rxq
->sw_ring
[rx_id
]);
968 /* When next RX descriptor is on a cache line boundary,
969 * prefetch the next 4 RX descriptors and next 8 pointers
972 if ((rx_id
& 0x3) == 0) {
973 rte_prefetch0(&rx_ring
[rx_id
]);
974 rte_prefetch0(rxq
->sw_ring
[rx_id
]);
980 rte_cpu_to_le_64(rte_mbuf_data_iova_default(nmb
));
982 /* Set data buffer address and data length of the mbuf */
983 rxdp
->read
.hdr_addr
= 0;
984 rxdp
->read
.pkt_addr
= dma_addr
;
985 rx_packet_len
= (qword1
& IAVF_RXD_QW1_LENGTH_PBUF_MASK
) >>
986 IAVF_RXD_QW1_LENGTH_PBUF_SHIFT
;
987 rxm
->data_len
= rx_packet_len
;
988 rxm
->data_off
= RTE_PKTMBUF_HEADROOM
;
990 /* If this is the first buffer of the received packet, set the
991 * pointer to the first mbuf of the packet and initialize its
992 * context. Otherwise, update the total length and the number
993 * of segments of the current scattered packet, and update the
994 * pointer to the last mbuf of the current packet.
998 first_seg
->nb_segs
= 1;
999 first_seg
->pkt_len
= rx_packet_len
;
1001 first_seg
->pkt_len
=
1002 (uint16_t)(first_seg
->pkt_len
+
1004 first_seg
->nb_segs
++;
1005 last_seg
->next
= rxm
;
1008 /* If this is not the last buffer of the received packet,
1009 * update the pointer to the last mbuf of the current scattered
1010 * packet and continue to parse the RX ring.
1012 if (!(rx_status
& (1 << IAVF_RX_DESC_STATUS_EOF_SHIFT
))) {
1017 /* This is the last buffer of the received packet. If the CRC
1018 * is not stripped by the hardware:
1019 * - Subtract the CRC length from the total packet length.
1020 * - If the last buffer only contains the whole CRC or a part
1021 * of it, free the mbuf associated to the last buffer. If part
1022 * of the CRC is also contained in the previous mbuf, subtract
1023 * the length of that CRC part from the data length of the
1027 if (unlikely(rxq
->crc_len
> 0)) {
1028 first_seg
->pkt_len
-= ETHER_CRC_LEN
;
1029 if (rx_packet_len
<= ETHER_CRC_LEN
) {
1030 rte_pktmbuf_free_seg(rxm
);
1031 first_seg
->nb_segs
--;
1032 last_seg
->data_len
=
1033 (uint16_t)(last_seg
->data_len
-
1034 (ETHER_CRC_LEN
- rx_packet_len
));
1035 last_seg
->next
= NULL
;
1037 rxm
->data_len
= (uint16_t)(rx_packet_len
-
1041 first_seg
->port
= rxq
->port_id
;
1042 first_seg
->ol_flags
= 0;
1043 iavf_rxd_to_vlan_tci(first_seg
, &rxd
);
1044 pkt_flags
= iavf_rxd_to_pkt_flags(qword1
);
1045 first_seg
->packet_type
=
1046 ptype_tbl
[(uint8_t)((qword1
&
1047 IAVF_RXD_QW1_PTYPE_MASK
) >> IAVF_RXD_QW1_PTYPE_SHIFT
)];
1049 if (pkt_flags
& PKT_RX_RSS_HASH
)
1050 first_seg
->hash
.rss
=
1051 rte_le_to_cpu_32(rxd
.wb
.qword0
.hi_dword
.rss
);
1053 first_seg
->ol_flags
|= pkt_flags
;
1055 /* Prefetch data of first segment, if configured to do so. */
1056 rte_prefetch0(RTE_PTR_ADD(first_seg
->buf_addr
,
1057 first_seg
->data_off
));
1058 rx_pkts
[nb_rx
++] = first_seg
;
1062 /* Record index of the next RX descriptor to probe. */
1063 rxq
->rx_tail
= rx_id
;
1064 rxq
->pkt_first_seg
= first_seg
;
1065 rxq
->pkt_last_seg
= last_seg
;
1067 /* If the number of free RX descriptors is greater than the RX free
1068 * threshold of the queue, advance the Receive Descriptor Tail (RDT)
1069 * register. Update the RDT with the value of the last processed RX
1070 * descriptor minus 1, to guarantee that the RDT register is never
1071 * equal to the RDH register, which creates a "full" ring situtation
1072 * from the hardware point of view.
1074 nb_hold
= (uint16_t)(nb_hold
+ rxq
->nb_rx_hold
);
1075 if (nb_hold
> rxq
->rx_free_thresh
) {
1076 PMD_RX_LOG(DEBUG
, "port_id=%u queue_id=%u rx_tail=%u "
1077 "nb_hold=%u nb_rx=%u",
1078 rxq
->port_id
, rxq
->queue_id
,
1079 rx_id
, nb_hold
, nb_rx
);
1080 rx_id
= (uint16_t)(rx_id
== 0 ?
1081 (rxq
->nb_rx_desc
- 1) : (rx_id
- 1));
1082 IAVF_PCI_REG_WRITE(rxq
->qrx_tail
, rx_id
);
1085 rxq
->nb_rx_hold
= nb_hold
;
1090 #define IAVF_LOOK_AHEAD 8
1092 iavf_rx_scan_hw_ring(struct iavf_rx_queue
*rxq
)
1094 volatile union iavf_rx_desc
*rxdp
;
1095 struct rte_mbuf
**rxep
;
1096 struct rte_mbuf
*mb
;
1100 int32_t s
[IAVF_LOOK_AHEAD
], nb_dd
;
1101 int32_t i
, j
, nb_rx
= 0;
1103 static const uint32_t ptype_tbl
[UINT8_MAX
+ 1] __rte_cache_aligned
= {
1105 [1] = RTE_PTYPE_L2_ETHER
,
1106 /* [2] - [21] reserved */
1107 [22] = RTE_PTYPE_L2_ETHER
| RTE_PTYPE_L3_IPV4_EXT_UNKNOWN
|
1109 [23] = RTE_PTYPE_L2_ETHER
| RTE_PTYPE_L3_IPV4_EXT_UNKNOWN
|
1110 RTE_PTYPE_L4_NONFRAG
,
1111 [24] = RTE_PTYPE_L2_ETHER
| RTE_PTYPE_L3_IPV4_EXT_UNKNOWN
|
1114 [26] = RTE_PTYPE_L2_ETHER
| RTE_PTYPE_L3_IPV4_EXT_UNKNOWN
|
1116 [27] = RTE_PTYPE_L2_ETHER
| RTE_PTYPE_L3_IPV4_EXT_UNKNOWN
|
1118 [28] = RTE_PTYPE_L2_ETHER
| RTE_PTYPE_L3_IPV4_EXT_UNKNOWN
|
1120 /* All others reserved */
1123 rxdp
= &rxq
->rx_ring
[rxq
->rx_tail
];
1124 rxep
= &rxq
->sw_ring
[rxq
->rx_tail
];
1126 qword1
= rte_le_to_cpu_64(rxdp
->wb
.qword1
.status_error_len
);
1127 rx_status
= (qword1
& IAVF_RXD_QW1_STATUS_MASK
) >>
1128 IAVF_RXD_QW1_STATUS_SHIFT
;
1130 /* Make sure there is at least 1 packet to receive */
1131 if (!(rx_status
& (1 << IAVF_RX_DESC_STATUS_DD_SHIFT
)))
1134 /* Scan LOOK_AHEAD descriptors at a time to determine which
1135 * descriptors reference packets that are ready to be received.
1137 for (i
= 0; i
< IAVF_RX_MAX_BURST
; i
+= IAVF_LOOK_AHEAD
,
1138 rxdp
+= IAVF_LOOK_AHEAD
, rxep
+= IAVF_LOOK_AHEAD
) {
1139 /* Read desc statuses backwards to avoid race condition */
1140 for (j
= IAVF_LOOK_AHEAD
- 1; j
>= 0; j
--) {
1141 qword1
= rte_le_to_cpu_64(
1142 rxdp
[j
].wb
.qword1
.status_error_len
);
1143 s
[j
] = (qword1
& IAVF_RXD_QW1_STATUS_MASK
) >>
1144 IAVF_RXD_QW1_STATUS_SHIFT
;
1149 /* Compute how many status bits were set */
1150 for (j
= 0, nb_dd
= 0; j
< IAVF_LOOK_AHEAD
; j
++)
1151 nb_dd
+= s
[j
] & (1 << IAVF_RX_DESC_STATUS_DD_SHIFT
);
1155 /* Translate descriptor info to mbuf parameters */
1156 for (j
= 0; j
< nb_dd
; j
++) {
1157 IAVF_DUMP_RX_DESC(rxq
, &rxdp
[j
],
1158 rxq
->rx_tail
+ i
* IAVF_LOOK_AHEAD
+ j
);
1161 qword1
= rte_le_to_cpu_64
1162 (rxdp
[j
].wb
.qword1
.status_error_len
);
1163 pkt_len
= ((qword1
& IAVF_RXD_QW1_LENGTH_PBUF_MASK
) >>
1164 IAVF_RXD_QW1_LENGTH_PBUF_SHIFT
) - rxq
->crc_len
;
1165 mb
->data_len
= pkt_len
;
1166 mb
->pkt_len
= pkt_len
;
1168 iavf_rxd_to_vlan_tci(mb
, &rxdp
[j
]);
1169 pkt_flags
= iavf_rxd_to_pkt_flags(qword1
);
1171 ptype_tbl
[(uint8_t)((qword1
&
1172 IAVF_RXD_QW1_PTYPE_MASK
) >>
1173 IAVF_RXD_QW1_PTYPE_SHIFT
)];
1175 if (pkt_flags
& PKT_RX_RSS_HASH
)
1176 mb
->hash
.rss
= rte_le_to_cpu_32(
1177 rxdp
[j
].wb
.qword0
.hi_dword
.rss
);
1179 mb
->ol_flags
|= pkt_flags
;
1182 for (j
= 0; j
< IAVF_LOOK_AHEAD
; j
++)
1183 rxq
->rx_stage
[i
+ j
] = rxep
[j
];
1185 if (nb_dd
!= IAVF_LOOK_AHEAD
)
1189 /* Clear software ring entries */
1190 for (i
= 0; i
< nb_rx
; i
++)
1191 rxq
->sw_ring
[rxq
->rx_tail
+ i
] = NULL
;
1196 static inline uint16_t
1197 iavf_rx_fill_from_stage(struct iavf_rx_queue
*rxq
,
1198 struct rte_mbuf
**rx_pkts
,
1202 struct rte_mbuf
**stage
= &rxq
->rx_stage
[rxq
->rx_next_avail
];
1204 nb_pkts
= (uint16_t)RTE_MIN(nb_pkts
, rxq
->rx_nb_avail
);
1206 for (i
= 0; i
< nb_pkts
; i
++)
1207 rx_pkts
[i
] = stage
[i
];
1209 rxq
->rx_nb_avail
= (uint16_t)(rxq
->rx_nb_avail
- nb_pkts
);
1210 rxq
->rx_next_avail
= (uint16_t)(rxq
->rx_next_avail
+ nb_pkts
);
1216 iavf_rx_alloc_bufs(struct iavf_rx_queue
*rxq
)
1218 volatile union iavf_rx_desc
*rxdp
;
1219 struct rte_mbuf
**rxep
;
1220 struct rte_mbuf
*mb
;
1221 uint16_t alloc_idx
, i
;
1225 /* Allocate buffers in bulk */
1226 alloc_idx
= (uint16_t)(rxq
->rx_free_trigger
-
1227 (rxq
->rx_free_thresh
- 1));
1228 rxep
= &rxq
->sw_ring
[alloc_idx
];
1229 diag
= rte_mempool_get_bulk(rxq
->mp
, (void *)rxep
,
1230 rxq
->rx_free_thresh
);
1231 if (unlikely(diag
!= 0)) {
1232 PMD_RX_LOG(ERR
, "Failed to get mbufs in bulk");
1236 rxdp
= &rxq
->rx_ring
[alloc_idx
];
1237 for (i
= 0; i
< rxq
->rx_free_thresh
; i
++) {
1238 if (likely(i
< (rxq
->rx_free_thresh
- 1)))
1239 /* Prefetch next mbuf */
1240 rte_prefetch0(rxep
[i
+ 1]);
1243 rte_mbuf_refcnt_set(mb
, 1);
1245 mb
->data_off
= RTE_PKTMBUF_HEADROOM
;
1247 mb
->port
= rxq
->port_id
;
1248 dma_addr
= rte_cpu_to_le_64(rte_mbuf_data_iova_default(mb
));
1249 rxdp
[i
].read
.hdr_addr
= 0;
1250 rxdp
[i
].read
.pkt_addr
= dma_addr
;
1253 /* Update rx tail register */
1255 IAVF_PCI_REG_WRITE_RELAXED(rxq
->qrx_tail
, rxq
->rx_free_trigger
);
1257 rxq
->rx_free_trigger
=
1258 (uint16_t)(rxq
->rx_free_trigger
+ rxq
->rx_free_thresh
);
1259 if (rxq
->rx_free_trigger
>= rxq
->nb_rx_desc
)
1260 rxq
->rx_free_trigger
= (uint16_t)(rxq
->rx_free_thresh
- 1);
1265 static inline uint16_t
1266 rx_recv_pkts(void *rx_queue
, struct rte_mbuf
**rx_pkts
, uint16_t nb_pkts
)
1268 struct iavf_rx_queue
*rxq
= (struct iavf_rx_queue
*)rx_queue
;
1274 if (rxq
->rx_nb_avail
)
1275 return iavf_rx_fill_from_stage(rxq
, rx_pkts
, nb_pkts
);
1277 nb_rx
= (uint16_t)iavf_rx_scan_hw_ring(rxq
);
1278 rxq
->rx_next_avail
= 0;
1279 rxq
->rx_nb_avail
= nb_rx
;
1280 rxq
->rx_tail
= (uint16_t)(rxq
->rx_tail
+ nb_rx
);
1282 if (rxq
->rx_tail
> rxq
->rx_free_trigger
) {
1283 if (iavf_rx_alloc_bufs(rxq
) != 0) {
1286 /* TODO: count rx_mbuf_alloc_failed here */
1288 rxq
->rx_nb_avail
= 0;
1289 rxq
->rx_tail
= (uint16_t)(rxq
->rx_tail
- nb_rx
);
1290 for (i
= 0, j
= rxq
->rx_tail
; i
< nb_rx
; i
++, j
++)
1291 rxq
->sw_ring
[j
] = rxq
->rx_stage
[i
];
1297 if (rxq
->rx_tail
>= rxq
->nb_rx_desc
)
1300 PMD_RX_LOG(DEBUG
, "port_id=%u queue_id=%u rx_tail=%u, nb_rx=%u",
1301 rxq
->port_id
, rxq
->queue_id
,
1302 rxq
->rx_tail
, nb_rx
);
1304 if (rxq
->rx_nb_avail
)
1305 return iavf_rx_fill_from_stage(rxq
, rx_pkts
, nb_pkts
);
1311 iavf_recv_pkts_bulk_alloc(void *rx_queue
,
1312 struct rte_mbuf
**rx_pkts
,
1315 uint16_t nb_rx
= 0, n
, count
;
1317 if (unlikely(nb_pkts
== 0))
1320 if (likely(nb_pkts
<= IAVF_RX_MAX_BURST
))
1321 return rx_recv_pkts(rx_queue
, rx_pkts
, nb_pkts
);
1324 n
= RTE_MIN(nb_pkts
, IAVF_RX_MAX_BURST
);
1325 count
= rx_recv_pkts(rx_queue
, &rx_pkts
[nb_rx
], n
);
1326 nb_rx
= (uint16_t)(nb_rx
+ count
);
1327 nb_pkts
= (uint16_t)(nb_pkts
- count
);
1336 iavf_xmit_cleanup(struct iavf_tx_queue
*txq
)
1338 struct iavf_tx_entry
*sw_ring
= txq
->sw_ring
;
1339 uint16_t last_desc_cleaned
= txq
->last_desc_cleaned
;
1340 uint16_t nb_tx_desc
= txq
->nb_tx_desc
;
1341 uint16_t desc_to_clean_to
;
1342 uint16_t nb_tx_to_clean
;
1344 volatile struct iavf_tx_desc
*txd
= txq
->tx_ring
;
1346 desc_to_clean_to
= (uint16_t)(last_desc_cleaned
+ txq
->rs_thresh
);
1347 if (desc_to_clean_to
>= nb_tx_desc
)
1348 desc_to_clean_to
= (uint16_t)(desc_to_clean_to
- nb_tx_desc
);
1350 desc_to_clean_to
= sw_ring
[desc_to_clean_to
].last_id
;
1351 if ((txd
[desc_to_clean_to
].cmd_type_offset_bsz
&
1352 rte_cpu_to_le_64(IAVF_TXD_QW1_DTYPE_MASK
)) !=
1353 rte_cpu_to_le_64(IAVF_TX_DESC_DTYPE_DESC_DONE
)) {
1354 PMD_TX_FREE_LOG(DEBUG
, "TX descriptor %4u is not done "
1355 "(port=%d queue=%d)", desc_to_clean_to
,
1356 txq
->port_id
, txq
->queue_id
);
1360 if (last_desc_cleaned
> desc_to_clean_to
)
1361 nb_tx_to_clean
= (uint16_t)((nb_tx_desc
- last_desc_cleaned
) +
1364 nb_tx_to_clean
= (uint16_t)(desc_to_clean_to
-
1367 txd
[desc_to_clean_to
].cmd_type_offset_bsz
= 0;
1369 txq
->last_desc_cleaned
= desc_to_clean_to
;
1370 txq
->nb_free
= (uint16_t)(txq
->nb_free
+ nb_tx_to_clean
);
1375 /* Check if the context descriptor is needed for TX offloading */
1376 static inline uint16_t
1377 iavf_calc_context_desc(uint64_t flags
)
1379 static uint64_t mask
= PKT_TX_TCP_SEG
;
1381 return (flags
& mask
) ? 1 : 0;
1385 iavf_txd_enable_checksum(uint64_t ol_flags
,
1387 uint32_t *td_offset
,
1388 union iavf_tx_offload tx_offload
)
1391 *td_offset
|= (tx_offload
.l2_len
>> 1) <<
1392 IAVF_TX_DESC_LENGTH_MACLEN_SHIFT
;
1394 /* Enable L3 checksum offloads */
1395 if (ol_flags
& PKT_TX_IP_CKSUM
) {
1396 *td_cmd
|= IAVF_TX_DESC_CMD_IIPT_IPV4_CSUM
;
1397 *td_offset
|= (tx_offload
.l3_len
>> 2) <<
1398 IAVF_TX_DESC_LENGTH_IPLEN_SHIFT
;
1399 } else if (ol_flags
& PKT_TX_IPV4
) {
1400 *td_cmd
|= IAVF_TX_DESC_CMD_IIPT_IPV4
;
1401 *td_offset
|= (tx_offload
.l3_len
>> 2) <<
1402 IAVF_TX_DESC_LENGTH_IPLEN_SHIFT
;
1403 } else if (ol_flags
& PKT_TX_IPV6
) {
1404 *td_cmd
|= IAVF_TX_DESC_CMD_IIPT_IPV6
;
1405 *td_offset
|= (tx_offload
.l3_len
>> 2) <<
1406 IAVF_TX_DESC_LENGTH_IPLEN_SHIFT
;
1409 if (ol_flags
& PKT_TX_TCP_SEG
) {
1410 *td_cmd
|= IAVF_TX_DESC_CMD_L4T_EOFT_TCP
;
1411 *td_offset
|= (tx_offload
.l4_len
>> 2) <<
1412 IAVF_TX_DESC_LENGTH_L4_FC_LEN_SHIFT
;
1416 /* Enable L4 checksum offloads */
1417 switch (ol_flags
& PKT_TX_L4_MASK
) {
1418 case PKT_TX_TCP_CKSUM
:
1419 *td_cmd
|= IAVF_TX_DESC_CMD_L4T_EOFT_TCP
;
1420 *td_offset
|= (sizeof(struct tcp_hdr
) >> 2) <<
1421 IAVF_TX_DESC_LENGTH_L4_FC_LEN_SHIFT
;
1423 case PKT_TX_SCTP_CKSUM
:
1424 *td_cmd
|= IAVF_TX_DESC_CMD_L4T_EOFT_SCTP
;
1425 *td_offset
|= (sizeof(struct sctp_hdr
) >> 2) <<
1426 IAVF_TX_DESC_LENGTH_L4_FC_LEN_SHIFT
;
1428 case PKT_TX_UDP_CKSUM
:
1429 *td_cmd
|= IAVF_TX_DESC_CMD_L4T_EOFT_UDP
;
1430 *td_offset
|= (sizeof(struct udp_hdr
) >> 2) <<
1431 IAVF_TX_DESC_LENGTH_L4_FC_LEN_SHIFT
;
1438 /* set TSO context descriptor
1439 * support IP -> L4 and IP -> IP -> L4
1441 static inline uint64_t
1442 iavf_set_tso_ctx(struct rte_mbuf
*mbuf
, union iavf_tx_offload tx_offload
)
1444 uint64_t ctx_desc
= 0;
1445 uint32_t cd_cmd
, hdr_len
, cd_tso_len
;
1447 if (!tx_offload
.l4_len
) {
1448 PMD_TX_LOG(DEBUG
, "L4 length set to 0");
1452 /* in case of non tunneling packet, the outer_l2_len and
1453 * outer_l3_len must be 0.
1455 hdr_len
= tx_offload
.l2_len
+
1459 cd_cmd
= IAVF_TX_CTX_DESC_TSO
;
1460 cd_tso_len
= mbuf
->pkt_len
- hdr_len
;
1461 ctx_desc
|= ((uint64_t)cd_cmd
<< IAVF_TXD_CTX_QW1_CMD_SHIFT
) |
1462 ((uint64_t)cd_tso_len
<< IAVF_TXD_CTX_QW1_TSO_LEN_SHIFT
) |
1463 ((uint64_t)mbuf
->tso_segsz
<< IAVF_TXD_CTX_QW1_MSS_SHIFT
);
1468 /* Construct the tx flags */
1469 static inline uint64_t
1470 iavf_build_ctob(uint32_t td_cmd
, uint32_t td_offset
, unsigned int size
,
1473 return rte_cpu_to_le_64(IAVF_TX_DESC_DTYPE_DATA
|
1474 ((uint64_t)td_cmd
<< IAVF_TXD_QW1_CMD_SHIFT
) |
1475 ((uint64_t)td_offset
<<
1476 IAVF_TXD_QW1_OFFSET_SHIFT
) |
1478 IAVF_TXD_QW1_TX_BUF_SZ_SHIFT
) |
1479 ((uint64_t)td_tag
<<
1480 IAVF_TXD_QW1_L2TAG1_SHIFT
));
1485 iavf_xmit_pkts(void *tx_queue
, struct rte_mbuf
**tx_pkts
, uint16_t nb_pkts
)
1487 volatile struct iavf_tx_desc
*txd
;
1488 volatile struct iavf_tx_desc
*txr
;
1489 struct iavf_tx_queue
*txq
;
1490 struct iavf_tx_entry
*sw_ring
;
1491 struct iavf_tx_entry
*txe
, *txn
;
1492 struct rte_mbuf
*tx_pkt
;
1493 struct rte_mbuf
*m_seg
;
1504 uint64_t buf_dma_addr
;
1505 union iavf_tx_offload tx_offload
= {0};
1508 sw_ring
= txq
->sw_ring
;
1510 tx_id
= txq
->tx_tail
;
1511 txe
= &sw_ring
[tx_id
];
1513 /* Check if the descriptor ring needs to be cleaned. */
1514 if (txq
->nb_free
< txq
->free_thresh
)
1515 iavf_xmit_cleanup(txq
);
1517 for (nb_tx
= 0; nb_tx
< nb_pkts
; nb_tx
++) {
1522 tx_pkt
= *tx_pkts
++;
1523 RTE_MBUF_PREFETCH_TO_FREE(txe
->mbuf
);
1525 ol_flags
= tx_pkt
->ol_flags
;
1526 tx_offload
.l2_len
= tx_pkt
->l2_len
;
1527 tx_offload
.l3_len
= tx_pkt
->l3_len
;
1528 tx_offload
.l4_len
= tx_pkt
->l4_len
;
1529 tx_offload
.tso_segsz
= tx_pkt
->tso_segsz
;
1531 /* Calculate the number of context descriptors needed. */
1532 nb_ctx
= iavf_calc_context_desc(ol_flags
);
1534 /* The number of descriptors that must be allocated for
1535 * a packet equals to the number of the segments of that
1536 * packet plus 1 context descriptor if needed.
1538 nb_used
= (uint16_t)(tx_pkt
->nb_segs
+ nb_ctx
);
1539 tx_last
= (uint16_t)(tx_id
+ nb_used
- 1);
1542 if (tx_last
>= txq
->nb_tx_desc
)
1543 tx_last
= (uint16_t)(tx_last
- txq
->nb_tx_desc
);
1545 PMD_TX_LOG(DEBUG
, "port_id=%u queue_id=%u"
1546 " tx_first=%u tx_last=%u",
1547 txq
->port_id
, txq
->queue_id
, tx_id
, tx_last
);
1549 if (nb_used
> txq
->nb_free
) {
1550 if (iavf_xmit_cleanup(txq
)) {
1555 if (unlikely(nb_used
> txq
->rs_thresh
)) {
1556 while (nb_used
> txq
->nb_free
) {
1557 if (iavf_xmit_cleanup(txq
)) {
1566 /* Descriptor based VLAN insertion */
1567 if (ol_flags
& PKT_TX_VLAN_PKT
) {
1568 td_cmd
|= IAVF_TX_DESC_CMD_IL2TAG1
;
1569 td_tag
= tx_pkt
->vlan_tci
;
1572 /* According to datasheet, the bit2 is reserved and must be
1577 /* Enable checksum offloading */
1578 if (ol_flags
& IAVF_TX_CKSUM_OFFLOAD_MASK
)
1579 iavf_txd_enable_checksum(ol_flags
, &td_cmd
,
1580 &td_offset
, tx_offload
);
1583 /* Setup TX context descriptor if required */
1584 uint64_t cd_type_cmd_tso_mss
=
1585 IAVF_TX_DESC_DTYPE_CONTEXT
;
1587 txn
= &sw_ring
[txe
->next_id
];
1588 RTE_MBUF_PREFETCH_TO_FREE(txn
->mbuf
);
1590 rte_pktmbuf_free_seg(txe
->mbuf
);
1595 if (ol_flags
& PKT_TX_TCP_SEG
)
1596 cd_type_cmd_tso_mss
|=
1597 iavf_set_tso_ctx(tx_pkt
, tx_offload
);
1599 IAVF_DUMP_TX_DESC(txq
, &txr
[tx_id
], tx_id
);
1600 txe
->last_id
= tx_last
;
1601 tx_id
= txe
->next_id
;
1608 txn
= &sw_ring
[txe
->next_id
];
1611 rte_pktmbuf_free_seg(txe
->mbuf
);
1614 /* Setup TX Descriptor */
1615 slen
= m_seg
->data_len
;
1616 buf_dma_addr
= rte_mbuf_data_iova(m_seg
);
1617 txd
->buffer_addr
= rte_cpu_to_le_64(buf_dma_addr
);
1618 txd
->cmd_type_offset_bsz
= iavf_build_ctob(td_cmd
,
1623 IAVF_DUMP_TX_DESC(txq
, txd
, tx_id
);
1624 txe
->last_id
= tx_last
;
1625 tx_id
= txe
->next_id
;
1627 m_seg
= m_seg
->next
;
1630 /* The last packet data descriptor needs End Of Packet (EOP) */
1631 td_cmd
|= IAVF_TX_DESC_CMD_EOP
;
1632 txq
->nb_used
= (uint16_t)(txq
->nb_used
+ nb_used
);
1633 txq
->nb_free
= (uint16_t)(txq
->nb_free
- nb_used
);
1635 if (txq
->nb_used
>= txq
->rs_thresh
) {
1636 PMD_TX_LOG(DEBUG
, "Setting RS bit on TXD id="
1637 "%4u (port=%d queue=%d)",
1638 tx_last
, txq
->port_id
, txq
->queue_id
);
1640 td_cmd
|= IAVF_TX_DESC_CMD_RS
;
1642 /* Update txq RS bit counters */
1646 txd
->cmd_type_offset_bsz
|=
1647 rte_cpu_to_le_64(((uint64_t)td_cmd
) <<
1648 IAVF_TXD_QW1_CMD_SHIFT
);
1649 IAVF_DUMP_TX_DESC(txq
, txd
, tx_id
);
1655 PMD_TX_LOG(DEBUG
, "port_id=%u queue_id=%u tx_tail=%u nb_tx=%u",
1656 txq
->port_id
, txq
->queue_id
, tx_id
, nb_tx
);
1658 IAVF_PCI_REG_WRITE_RELAXED(txq
->qtx_tail
, tx_id
);
1659 txq
->tx_tail
= tx_id
;
1665 iavf_xmit_pkts_vec(void *tx_queue
, struct rte_mbuf
**tx_pkts
,
1669 struct iavf_tx_queue
*txq
= (struct iavf_tx_queue
*)tx_queue
;
1674 num
= (uint16_t)RTE_MIN(nb_pkts
, txq
->rs_thresh
);
1675 ret
= iavf_xmit_fixed_burst_vec(tx_queue
, &tx_pkts
[nb_tx
], num
);
1685 /* TX prep functions */
1687 iavf_prep_pkts(__rte_unused
void *tx_queue
, struct rte_mbuf
**tx_pkts
,
1694 for (i
= 0; i
< nb_pkts
; i
++) {
1696 ol_flags
= m
->ol_flags
;
1698 /* Check condition for nb_segs > IAVF_TX_MAX_MTU_SEG. */
1699 if (!(ol_flags
& PKT_TX_TCP_SEG
)) {
1700 if (m
->nb_segs
> IAVF_TX_MAX_MTU_SEG
) {
1701 rte_errno
= -EINVAL
;
1704 } else if ((m
->tso_segsz
< IAVF_MIN_TSO_MSS
) ||
1705 (m
->tso_segsz
> IAVF_MAX_TSO_MSS
)) {
1706 /* MSS outside the range are considered malicious */
1707 rte_errno
= -EINVAL
;
1711 if (ol_flags
& IAVF_TX_OFFLOAD_NOTSUP_MASK
) {
1712 rte_errno
= -ENOTSUP
;
1716 #ifdef RTE_LIBRTE_ETHDEV_DEBUG
1717 ret
= rte_validate_tx_offload(m
);
1723 ret
= rte_net_intel_cksum_prepare(m
);
1733 /* choose rx function*/
1735 iavf_set_rx_function(struct rte_eth_dev
*dev
)
1737 struct iavf_adapter
*adapter
=
1738 IAVF_DEV_PRIVATE_TO_ADAPTER(dev
->data
->dev_private
);
1739 struct iavf_rx_queue
*rxq
;
1742 if (adapter
->rx_vec_allowed
) {
1743 if (dev
->data
->scattered_rx
) {
1744 PMD_DRV_LOG(DEBUG
, "Using Vector Scattered Rx callback"
1745 " (port=%d).", dev
->data
->port_id
);
1746 dev
->rx_pkt_burst
= iavf_recv_scattered_pkts_vec
;
1748 PMD_DRV_LOG(DEBUG
, "Using Vector Rx callback"
1749 " (port=%d).", dev
->data
->port_id
);
1750 dev
->rx_pkt_burst
= iavf_recv_pkts_vec
;
1752 for (i
= 0; i
< dev
->data
->nb_rx_queues
; i
++) {
1753 rxq
= dev
->data
->rx_queues
[i
];
1756 iavf_rxq_vec_setup(rxq
);
1758 } else if (dev
->data
->scattered_rx
) {
1759 PMD_DRV_LOG(DEBUG
, "Using a Scattered Rx callback (port=%d).",
1760 dev
->data
->port_id
);
1761 dev
->rx_pkt_burst
= iavf_recv_scattered_pkts
;
1762 } else if (adapter
->rx_bulk_alloc_allowed
) {
1763 PMD_DRV_LOG(DEBUG
, "Using bulk Rx callback (port=%d).",
1764 dev
->data
->port_id
);
1765 dev
->rx_pkt_burst
= iavf_recv_pkts_bulk_alloc
;
1767 PMD_DRV_LOG(DEBUG
, "Using Basic Rx callback (port=%d).",
1768 dev
->data
->port_id
);
1769 dev
->rx_pkt_burst
= iavf_recv_pkts
;
1773 /* choose tx function*/
1775 iavf_set_tx_function(struct rte_eth_dev
*dev
)
1777 struct iavf_adapter
*adapter
=
1778 IAVF_DEV_PRIVATE_TO_ADAPTER(dev
->data
->dev_private
);
1779 struct iavf_tx_queue
*txq
;
1782 if (adapter
->tx_vec_allowed
) {
1783 PMD_DRV_LOG(DEBUG
, "Using Vector Tx callback (port=%d).",
1784 dev
->data
->port_id
);
1785 dev
->tx_pkt_burst
= iavf_xmit_pkts_vec
;
1786 dev
->tx_pkt_prepare
= NULL
;
1787 for (i
= 0; i
< dev
->data
->nb_tx_queues
; i
++) {
1788 txq
= dev
->data
->tx_queues
[i
];
1791 iavf_txq_vec_setup(txq
);
1794 PMD_DRV_LOG(DEBUG
, "Using Basic Tx callback (port=%d).",
1795 dev
->data
->port_id
);
1796 dev
->tx_pkt_burst
= iavf_xmit_pkts
;
1797 dev
->tx_pkt_prepare
= iavf_prep_pkts
;
1802 iavf_dev_rxq_info_get(struct rte_eth_dev
*dev
, uint16_t queue_id
,
1803 struct rte_eth_rxq_info
*qinfo
)
1805 struct iavf_rx_queue
*rxq
;
1807 rxq
= dev
->data
->rx_queues
[queue_id
];
1809 qinfo
->mp
= rxq
->mp
;
1810 qinfo
->scattered_rx
= dev
->data
->scattered_rx
;
1811 qinfo
->nb_desc
= rxq
->nb_rx_desc
;
1813 qinfo
->conf
.rx_free_thresh
= rxq
->rx_free_thresh
;
1814 qinfo
->conf
.rx_drop_en
= TRUE
;
1815 qinfo
->conf
.rx_deferred_start
= rxq
->rx_deferred_start
;
1819 iavf_dev_txq_info_get(struct rte_eth_dev
*dev
, uint16_t queue_id
,
1820 struct rte_eth_txq_info
*qinfo
)
1822 struct iavf_tx_queue
*txq
;
1824 txq
= dev
->data
->tx_queues
[queue_id
];
1826 qinfo
->nb_desc
= txq
->nb_tx_desc
;
1828 qinfo
->conf
.tx_free_thresh
= txq
->free_thresh
;
1829 qinfo
->conf
.tx_rs_thresh
= txq
->rs_thresh
;
1830 qinfo
->conf
.offloads
= txq
->offloads
;
1831 qinfo
->conf
.tx_deferred_start
= txq
->tx_deferred_start
;
1834 /* Get the number of used descriptors of a rx queue */
1836 iavf_dev_rxq_count(struct rte_eth_dev
*dev
, uint16_t queue_id
)
1838 #define IAVF_RXQ_SCAN_INTERVAL 4
1839 volatile union iavf_rx_desc
*rxdp
;
1840 struct iavf_rx_queue
*rxq
;
1843 rxq
= dev
->data
->rx_queues
[queue_id
];
1844 rxdp
= &rxq
->rx_ring
[rxq
->rx_tail
];
1845 while ((desc
< rxq
->nb_rx_desc
) &&
1846 ((rte_le_to_cpu_64(rxdp
->wb
.qword1
.status_error_len
) &
1847 IAVF_RXD_QW1_STATUS_MASK
) >> IAVF_RXD_QW1_STATUS_SHIFT
) &
1848 (1 << IAVF_RX_DESC_STATUS_DD_SHIFT
)) {
1849 /* Check the DD bit of a rx descriptor of each 4 in a group,
1850 * to avoid checking too frequently and downgrading performance
1853 desc
+= IAVF_RXQ_SCAN_INTERVAL
;
1854 rxdp
+= IAVF_RXQ_SCAN_INTERVAL
;
1855 if (rxq
->rx_tail
+ desc
>= rxq
->nb_rx_desc
)
1856 rxdp
= &(rxq
->rx_ring
[rxq
->rx_tail
+
1857 desc
- rxq
->nb_rx_desc
]);
1864 iavf_dev_rx_desc_status(void *rx_queue
, uint16_t offset
)
1866 struct iavf_rx_queue
*rxq
= rx_queue
;
1867 volatile uint64_t *status
;
1871 if (unlikely(offset
>= rxq
->nb_rx_desc
))
1874 if (offset
>= rxq
->nb_rx_desc
- rxq
->nb_rx_hold
)
1875 return RTE_ETH_RX_DESC_UNAVAIL
;
1877 desc
= rxq
->rx_tail
+ offset
;
1878 if (desc
>= rxq
->nb_rx_desc
)
1879 desc
-= rxq
->nb_rx_desc
;
1881 status
= &rxq
->rx_ring
[desc
].wb
.qword1
.status_error_len
;
1882 mask
= rte_le_to_cpu_64((1ULL << IAVF_RX_DESC_STATUS_DD_SHIFT
)
1883 << IAVF_RXD_QW1_STATUS_SHIFT
);
1885 return RTE_ETH_RX_DESC_DONE
;
1887 return RTE_ETH_RX_DESC_AVAIL
;
1891 iavf_dev_tx_desc_status(void *tx_queue
, uint16_t offset
)
1893 struct iavf_tx_queue
*txq
= tx_queue
;
1894 volatile uint64_t *status
;
1895 uint64_t mask
, expect
;
1898 if (unlikely(offset
>= txq
->nb_tx_desc
))
1901 desc
= txq
->tx_tail
+ offset
;
1902 /* go to next desc that has the RS bit */
1903 desc
= ((desc
+ txq
->rs_thresh
- 1) / txq
->rs_thresh
) *
1905 if (desc
>= txq
->nb_tx_desc
) {
1906 desc
-= txq
->nb_tx_desc
;
1907 if (desc
>= txq
->nb_tx_desc
)
1908 desc
-= txq
->nb_tx_desc
;
1911 status
= &txq
->tx_ring
[desc
].cmd_type_offset_bsz
;
1912 mask
= rte_le_to_cpu_64(IAVF_TXD_QW1_DTYPE_MASK
);
1913 expect
= rte_cpu_to_le_64(
1914 IAVF_TX_DESC_DTYPE_DESC_DONE
<< IAVF_TXD_QW1_DTYPE_SHIFT
);
1915 if ((*status
& mask
) == expect
)
1916 return RTE_ETH_TX_DESC_DONE
;
1918 return RTE_ETH_TX_DESC_FULL
;
1922 iavf_recv_pkts_vec(__rte_unused
void *rx_queue
,
1923 __rte_unused
struct rte_mbuf
**rx_pkts
,
1924 __rte_unused
uint16_t nb_pkts
)
1930 iavf_recv_scattered_pkts_vec(__rte_unused
void *rx_queue
,
1931 __rte_unused
struct rte_mbuf
**rx_pkts
,
1932 __rte_unused
uint16_t nb_pkts
)
1938 iavf_xmit_fixed_burst_vec(__rte_unused
void *tx_queue
,
1939 __rte_unused
struct rte_mbuf
**tx_pkts
,
1940 __rte_unused
uint16_t nb_pkts
)
1946 iavf_rxq_vec_setup(__rte_unused
struct iavf_rx_queue
*rxq
)
1952 iavf_txq_vec_setup(__rte_unused
struct iavf_tx_queue
*txq
)