1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2010-2015 Intel Corporation
5 #ifndef _IXGBE_RXTX_VEC_COMMON_H_
6 #define _IXGBE_RXTX_VEC_COMMON_H_
8 #include <rte_ethdev_driver.h>
10 #include "ixgbe_ethdev.h"
11 #include "ixgbe_rxtx.h"
13 static inline uint16_t
14 reassemble_packets(struct ixgbe_rx_queue
*rxq
, struct rte_mbuf
**rx_bufs
,
15 uint16_t nb_bufs
, uint8_t *split_flags
)
17 struct rte_mbuf
*pkts
[nb_bufs
]; /*finished pkts*/
18 struct rte_mbuf
*start
= rxq
->pkt_first_seg
;
19 struct rte_mbuf
*end
= rxq
->pkt_last_seg
;
20 unsigned int pkt_idx
, buf_idx
;
22 for (buf_idx
= 0, pkt_idx
= 0; buf_idx
< nb_bufs
; buf_idx
++) {
24 /* processing a split packet */
25 end
->next
= rx_bufs
[buf_idx
];
26 rx_bufs
[buf_idx
]->data_len
+= rxq
->crc_len
;
29 start
->pkt_len
+= rx_bufs
[buf_idx
]->data_len
;
32 if (!split_flags
[buf_idx
]) {
33 /* it's the last packet of the set */
34 start
->hash
= end
->hash
;
35 start
->ol_flags
= end
->ol_flags
;
36 /* we need to strip crc for the whole packet */
37 start
->pkt_len
-= rxq
->crc_len
;
38 if (end
->data_len
> rxq
->crc_len
)
39 end
->data_len
-= rxq
->crc_len
;
41 /* free up last mbuf */
42 struct rte_mbuf
*secondlast
= start
;
45 while (secondlast
->next
!= end
)
46 secondlast
= secondlast
->next
;
47 secondlast
->data_len
-= (rxq
->crc_len
-
49 secondlast
->next
= NULL
;
50 rte_pktmbuf_free_seg(end
);
52 pkts
[pkt_idx
++] = start
;
56 /* not processing a split packet */
57 if (!split_flags
[buf_idx
]) {
58 /* not a split packet, save and skip */
59 pkts
[pkt_idx
++] = rx_bufs
[buf_idx
];
62 end
= start
= rx_bufs
[buf_idx
];
63 rx_bufs
[buf_idx
]->data_len
+= rxq
->crc_len
;
64 rx_bufs
[buf_idx
]->pkt_len
+= rxq
->crc_len
;
68 /* save the partial packet for next time */
69 rxq
->pkt_first_seg
= start
;
70 rxq
->pkt_last_seg
= end
;
71 memcpy(rx_bufs
, pkts
, pkt_idx
* (sizeof(*pkts
)));
75 static __rte_always_inline
int
76 ixgbe_tx_free_bufs(struct ixgbe_tx_queue
*txq
)
78 struct ixgbe_tx_entry_v
*txep
;
83 struct rte_mbuf
*m
, *free
[RTE_IXGBE_TX_MAX_FREE_BUF_SZ
];
85 /* check DD bit on threshold descriptor */
86 status
= txq
->tx_ring
[txq
->tx_next_dd
].wb
.status
;
87 if (!(status
& IXGBE_ADVTXD_STAT_DD
))
90 n
= txq
->tx_rs_thresh
;
93 * first buffer to free from S/W ring is at index
94 * tx_next_dd - (tx_rs_thresh-1)
96 txep
= &txq
->sw_ring_v
[txq
->tx_next_dd
- (n
- 1)];
97 m
= rte_pktmbuf_prefree_seg(txep
[0].mbuf
);
98 if (likely(m
!= NULL
)) {
101 for (i
= 1; i
< n
; i
++) {
102 m
= rte_pktmbuf_prefree_seg(txep
[i
].mbuf
);
103 if (likely(m
!= NULL
)) {
104 if (likely(m
->pool
== free
[0]->pool
))
107 rte_mempool_put_bulk(free
[0]->pool
,
108 (void *)free
, nb_free
);
114 rte_mempool_put_bulk(free
[0]->pool
, (void **)free
, nb_free
);
116 for (i
= 1; i
< n
; i
++) {
117 m
= rte_pktmbuf_prefree_seg(txep
[i
].mbuf
);
119 rte_mempool_put(m
->pool
, m
);
123 /* buffers were freed, update counters */
124 txq
->nb_tx_free
= (uint16_t)(txq
->nb_tx_free
+ txq
->tx_rs_thresh
);
125 txq
->tx_next_dd
= (uint16_t)(txq
->tx_next_dd
+ txq
->tx_rs_thresh
);
126 if (txq
->tx_next_dd
>= txq
->nb_tx_desc
)
127 txq
->tx_next_dd
= (uint16_t)(txq
->tx_rs_thresh
- 1);
129 return txq
->tx_rs_thresh
;
132 static __rte_always_inline
void
133 tx_backlog_entry(struct ixgbe_tx_entry_v
*txep
,
134 struct rte_mbuf
**tx_pkts
, uint16_t nb_pkts
)
138 for (i
= 0; i
< (int)nb_pkts
; ++i
)
139 txep
[i
].mbuf
= tx_pkts
[i
];
143 _ixgbe_tx_queue_release_mbufs_vec(struct ixgbe_tx_queue
*txq
)
146 struct ixgbe_tx_entry_v
*txe
;
147 const uint16_t max_desc
= (uint16_t)(txq
->nb_tx_desc
- 1);
149 if (txq
->sw_ring
== NULL
|| txq
->nb_tx_free
== max_desc
)
152 /* release the used mbufs in sw_ring */
153 for (i
= txq
->tx_next_dd
- (txq
->tx_rs_thresh
- 1);
155 i
= (i
+ 1) & max_desc
) {
156 txe
= &txq
->sw_ring_v
[i
];
157 rte_pktmbuf_free_seg(txe
->mbuf
);
159 txq
->nb_tx_free
= max_desc
;
162 for (i
= 0; i
< txq
->nb_tx_desc
; i
++) {
163 txe
= &txq
->sw_ring_v
[i
];
169 _ixgbe_rx_queue_release_mbufs_vec(struct ixgbe_rx_queue
*rxq
)
171 const unsigned int mask
= rxq
->nb_rx_desc
- 1;
174 if (rxq
->sw_ring
== NULL
|| rxq
->rxrearm_nb
>= rxq
->nb_rx_desc
)
177 /* free all mbufs that are valid in the ring */
178 if (rxq
->rxrearm_nb
== 0) {
179 for (i
= 0; i
< rxq
->nb_rx_desc
; i
++) {
180 if (rxq
->sw_ring
[i
].mbuf
!= NULL
)
181 rte_pktmbuf_free_seg(rxq
->sw_ring
[i
].mbuf
);
184 for (i
= rxq
->rx_tail
;
185 i
!= rxq
->rxrearm_start
;
186 i
= (i
+ 1) & mask
) {
187 if (rxq
->sw_ring
[i
].mbuf
!= NULL
)
188 rte_pktmbuf_free_seg(rxq
->sw_ring
[i
].mbuf
);
192 rxq
->rxrearm_nb
= rxq
->nb_rx_desc
;
194 /* set all entries to NULL */
195 memset(rxq
->sw_ring
, 0, sizeof(rxq
->sw_ring
[0]) * rxq
->nb_rx_desc
);
199 _ixgbe_tx_free_swring_vec(struct ixgbe_tx_queue
*txq
)
204 if (txq
->sw_ring
!= NULL
) {
205 rte_free(txq
->sw_ring_v
- 1);
206 txq
->sw_ring_v
= NULL
;
211 _ixgbe_reset_tx_queue_vec(struct ixgbe_tx_queue
*txq
)
213 static const union ixgbe_adv_tx_desc zeroed_desc
= { { 0 } };
214 struct ixgbe_tx_entry_v
*txe
= txq
->sw_ring_v
;
217 /* Zero out HW ring memory */
218 for (i
= 0; i
< txq
->nb_tx_desc
; i
++)
219 txq
->tx_ring
[i
] = zeroed_desc
;
221 /* Initialize SW ring entries */
222 for (i
= 0; i
< txq
->nb_tx_desc
; i
++) {
223 volatile union ixgbe_adv_tx_desc
*txd
= &txq
->tx_ring
[i
];
225 txd
->wb
.status
= IXGBE_TXD_STAT_DD
;
229 txq
->tx_next_dd
= (uint16_t)(txq
->tx_rs_thresh
- 1);
230 txq
->tx_next_rs
= (uint16_t)(txq
->tx_rs_thresh
- 1);
235 * Always allow 1 descriptor to be un-allocated to avoid
236 * a H/W race condition
238 txq
->last_desc_cleaned
= (uint16_t)(txq
->nb_tx_desc
- 1);
239 txq
->nb_tx_free
= (uint16_t)(txq
->nb_tx_desc
- 1);
241 memset((void *)&txq
->ctx_cache
, 0,
242 IXGBE_CTX_NUM
* sizeof(struct ixgbe_advctx_info
));
246 ixgbe_rxq_vec_setup_default(struct ixgbe_rx_queue
*rxq
)
249 struct rte_mbuf mb_def
= { .buf_addr
= 0 }; /* zeroed mbuf */
252 mb_def
.data_off
= RTE_PKTMBUF_HEADROOM
;
253 mb_def
.port
= rxq
->port_id
;
254 rte_mbuf_refcnt_set(&mb_def
, 1);
256 /* prevent compiler reordering: rearm_data covers previous fields */
257 rte_compiler_barrier();
258 p
= (uintptr_t)&mb_def
.rearm_data
;
259 rxq
->mbuf_initializer
= *(uint64_t *)p
;
264 ixgbe_txq_vec_setup_default(struct ixgbe_tx_queue
*txq
,
265 const struct ixgbe_txq_ops
*txq_ops
)
267 if (txq
->sw_ring_v
== NULL
)
270 /* leave the first one for overflow */
271 txq
->sw_ring_v
= txq
->sw_ring_v
+ 1;
278 ixgbe_rx_vec_dev_conf_condition_check_default(struct rte_eth_dev
*dev
)
280 #ifndef RTE_LIBRTE_IEEE1588
281 struct rte_fdir_conf
*fconf
= &dev
->data
->dev_conf
.fdir_conf
;
283 /* no fdir support */
284 if (fconf
->mode
!= RTE_FDIR_MODE_NONE
)