1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2010-2015 Intel Corporation
5 #ifndef _I40E_RXTX_VEC_COMMON_H_
6 #define _I40E_RXTX_VEC_COMMON_H_
8 #include <rte_ethdev_driver.h>
9 #include <rte_malloc.h>
11 #include "i40e_ethdev.h"
12 #include "i40e_rxtx.h"
14 static inline uint16_t
15 reassemble_packets(struct i40e_rx_queue
*rxq
, struct rte_mbuf
**rx_bufs
,
16 uint16_t nb_bufs
, uint8_t *split_flags
)
18 struct rte_mbuf
*pkts
[RTE_I40E_VPMD_RX_BURST
]; /*finished pkts*/
19 struct rte_mbuf
*start
= rxq
->pkt_first_seg
;
20 struct rte_mbuf
*end
= rxq
->pkt_last_seg
;
21 unsigned pkt_idx
, buf_idx
;
23 for (buf_idx
= 0, pkt_idx
= 0; buf_idx
< nb_bufs
; buf_idx
++) {
25 /* processing a split packet */
26 end
->next
= rx_bufs
[buf_idx
];
27 rx_bufs
[buf_idx
]->data_len
+= rxq
->crc_len
;
30 start
->pkt_len
+= rx_bufs
[buf_idx
]->data_len
;
33 if (!split_flags
[buf_idx
]) {
34 /* it's the last packet of the set */
35 start
->hash
= end
->hash
;
36 start
->ol_flags
= end
->ol_flags
;
37 /* we need to strip crc for the whole packet */
38 start
->pkt_len
-= rxq
->crc_len
;
39 if (end
->data_len
> rxq
->crc_len
)
40 end
->data_len
-= rxq
->crc_len
;
42 /* free up last mbuf */
43 struct rte_mbuf
*secondlast
= start
;
46 while (secondlast
->next
!= end
)
47 secondlast
= secondlast
->next
;
48 secondlast
->data_len
-= (rxq
->crc_len
-
50 secondlast
->next
= NULL
;
51 rte_pktmbuf_free_seg(end
);
53 pkts
[pkt_idx
++] = start
;
57 /* not processing a split packet */
58 if (!split_flags
[buf_idx
]) {
59 /* not a split packet, save and skip */
60 pkts
[pkt_idx
++] = rx_bufs
[buf_idx
];
63 end
= start
= rx_bufs
[buf_idx
];
64 rx_bufs
[buf_idx
]->data_len
+= rxq
->crc_len
;
65 rx_bufs
[buf_idx
]->pkt_len
+= rxq
->crc_len
;
69 /* save the partial packet for next time */
70 rxq
->pkt_first_seg
= start
;
71 rxq
->pkt_last_seg
= end
;
72 memcpy(rx_bufs
, pkts
, pkt_idx
* (sizeof(*pkts
)));
76 static __rte_always_inline
int
77 i40e_tx_free_bufs(struct i40e_tx_queue
*txq
)
79 struct i40e_tx_entry
*txep
;
83 struct rte_mbuf
*m
, *free
[RTE_I40E_TX_MAX_FREE_BUF_SZ
];
85 /* check DD bits on threshold descriptor */
86 if ((txq
->tx_ring
[txq
->tx_next_dd
].cmd_type_offset_bsz
&
87 rte_cpu_to_le_64(I40E_TXD_QW1_DTYPE_MASK
)) !=
88 rte_cpu_to_le_64(I40E_TX_DESC_DTYPE_DESC_DONE
))
91 n
= txq
->tx_rs_thresh
;
93 /* first buffer to free from S/W ring is at index
94 * tx_next_dd - (tx_rs_thresh-1)
96 txep
= &txq
->sw_ring
[txq
->tx_next_dd
- (n
- 1)];
97 m
= rte_pktmbuf_prefree_seg(txep
[0].mbuf
);
98 if (likely(m
!= NULL
)) {
101 for (i
= 1; i
< n
; i
++) {
102 m
= rte_pktmbuf_prefree_seg(txep
[i
].mbuf
);
103 if (likely(m
!= NULL
)) {
104 if (likely(m
->pool
== free
[0]->pool
)) {
107 rte_mempool_put_bulk(free
[0]->pool
,
115 rte_mempool_put_bulk(free
[0]->pool
, (void **)free
, nb_free
);
117 for (i
= 1; i
< n
; i
++) {
118 m
= rte_pktmbuf_prefree_seg(txep
[i
].mbuf
);
120 rte_mempool_put(m
->pool
, m
);
124 /* buffers were freed, update counters */
125 txq
->nb_tx_free
= (uint16_t)(txq
->nb_tx_free
+ txq
->tx_rs_thresh
);
126 txq
->tx_next_dd
= (uint16_t)(txq
->tx_next_dd
+ txq
->tx_rs_thresh
);
127 if (txq
->tx_next_dd
>= txq
->nb_tx_desc
)
128 txq
->tx_next_dd
= (uint16_t)(txq
->tx_rs_thresh
- 1);
130 return txq
->tx_rs_thresh
;
133 static __rte_always_inline
void
134 tx_backlog_entry(struct i40e_tx_entry
*txep
,
135 struct rte_mbuf
**tx_pkts
, uint16_t nb_pkts
)
139 for (i
= 0; i
< (int)nb_pkts
; ++i
)
140 txep
[i
].mbuf
= tx_pkts
[i
];
144 _i40e_rx_queue_release_mbufs_vec(struct i40e_rx_queue
*rxq
)
146 const unsigned mask
= rxq
->nb_rx_desc
- 1;
149 if (rxq
->sw_ring
== NULL
|| rxq
->rxrearm_nb
>= rxq
->nb_rx_desc
)
152 /* free all mbufs that are valid in the ring */
153 if (rxq
->rxrearm_nb
== 0) {
154 for (i
= 0; i
< rxq
->nb_rx_desc
; i
++) {
155 if (rxq
->sw_ring
[i
].mbuf
!= NULL
)
156 rte_pktmbuf_free_seg(rxq
->sw_ring
[i
].mbuf
);
159 for (i
= rxq
->rx_tail
;
160 i
!= rxq
->rxrearm_start
;
161 i
= (i
+ 1) & mask
) {
162 if (rxq
->sw_ring
[i
].mbuf
!= NULL
)
163 rte_pktmbuf_free_seg(rxq
->sw_ring
[i
].mbuf
);
167 rxq
->rxrearm_nb
= rxq
->nb_rx_desc
;
169 /* set all entries to NULL */
170 memset(rxq
->sw_ring
, 0, sizeof(rxq
->sw_ring
[0]) * rxq
->nb_rx_desc
);
174 i40e_rxq_vec_setup_default(struct i40e_rx_queue
*rxq
)
177 struct rte_mbuf mb_def
= { .buf_addr
= 0 }; /* zeroed mbuf */
180 mb_def
.data_off
= RTE_PKTMBUF_HEADROOM
;
181 mb_def
.port
= rxq
->port_id
;
182 rte_mbuf_refcnt_set(&mb_def
, 1);
184 /* prevent compiler reordering: rearm_data covers previous fields */
185 rte_compiler_barrier();
186 p
= (uintptr_t)&mb_def
.rearm_data
;
187 rxq
->mbuf_initializer
= *(uint64_t *)p
;
192 i40e_rx_vec_dev_conf_condition_check_default(struct rte_eth_dev
*dev
)
194 #ifndef RTE_LIBRTE_IEEE1588
195 struct rte_eth_rxmode
*rxmode
= &dev
->data
->dev_conf
.rxmode
;
196 struct rte_fdir_conf
*fconf
= &dev
->data
->dev_conf
.fdir_conf
;
198 /* no fdir support */
199 if (fconf
->mode
!= RTE_FDIR_MODE_NONE
)
202 /* - no csum error report support
203 * - no header split support
205 if (rxmode
->offloads
& DEV_RX_OFFLOAD_HEADER_SPLIT
)
208 /* no QinQ support */
209 if (rxmode
->offloads
& DEV_RX_OFFLOAD_VLAN_EXTEND
)