1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2019 Intel Corporation
5 #ifndef _ICE_RXTX_VEC_COMMON_H_
6 #define _ICE_RXTX_VEC_COMMON_H_
10 static inline uint16_t
11 ice_rx_reassemble_packets(struct ice_rx_queue
*rxq
, struct rte_mbuf
**rx_bufs
,
12 uint16_t nb_bufs
, uint8_t *split_flags
)
14 struct rte_mbuf
*pkts
[ICE_VPMD_RX_BURST
] = {0}; /*finished pkts*/
15 struct rte_mbuf
*start
= rxq
->pkt_first_seg
;
16 struct rte_mbuf
*end
= rxq
->pkt_last_seg
;
17 unsigned int pkt_idx
, buf_idx
;
19 for (buf_idx
= 0, pkt_idx
= 0; buf_idx
< nb_bufs
; buf_idx
++) {
21 /* processing a split packet */
22 end
->next
= rx_bufs
[buf_idx
];
23 rx_bufs
[buf_idx
]->data_len
+= rxq
->crc_len
;
26 start
->pkt_len
+= rx_bufs
[buf_idx
]->data_len
;
29 if (!split_flags
[buf_idx
]) {
30 /* it's the last packet of the set */
31 start
->hash
= end
->hash
;
32 start
->ol_flags
= end
->ol_flags
;
33 /* we need to strip crc for the whole packet */
34 start
->pkt_len
-= rxq
->crc_len
;
35 if (end
->data_len
> rxq
->crc_len
) {
36 end
->data_len
-= rxq
->crc_len
;
38 /* free up last mbuf */
39 struct rte_mbuf
*secondlast
= start
;
42 while (secondlast
->next
!= end
)
43 secondlast
= secondlast
->next
;
44 secondlast
->data_len
-= (rxq
->crc_len
-
46 secondlast
->next
= NULL
;
47 rte_pktmbuf_free_seg(end
);
49 pkts
[pkt_idx
++] = start
;
54 /* not processing a split packet */
55 if (!split_flags
[buf_idx
]) {
56 /* not a split packet, save and skip */
57 pkts
[pkt_idx
++] = rx_bufs
[buf_idx
];
60 start
= rx_bufs
[buf_idx
];
62 rx_bufs
[buf_idx
]->data_len
+= rxq
->crc_len
;
63 rx_bufs
[buf_idx
]->pkt_len
+= rxq
->crc_len
;
67 /* save the partial packet for next time */
68 rxq
->pkt_first_seg
= start
;
69 rxq
->pkt_last_seg
= end
;
70 rte_memcpy(rx_bufs
, pkts
, pkt_idx
* (sizeof(*pkts
)));
74 static __rte_always_inline
int
75 ice_tx_free_bufs(struct ice_tx_queue
*txq
)
77 struct ice_tx_entry
*txep
;
81 struct rte_mbuf
*m
, *free
[ICE_TX_MAX_FREE_BUF_SZ
];
83 /* check DD bits on threshold descriptor */
84 if ((txq
->tx_ring
[txq
->tx_next_dd
].cmd_type_offset_bsz
&
85 rte_cpu_to_le_64(ICE_TXD_QW1_DTYPE_M
)) !=
86 rte_cpu_to_le_64(ICE_TX_DESC_DTYPE_DESC_DONE
))
89 n
= txq
->tx_rs_thresh
;
91 /* first buffer to free from S/W ring is at index
92 * tx_next_dd - (tx_rs_thresh-1)
94 txep
= &txq
->sw_ring
[txq
->tx_next_dd
- (n
- 1)];
95 m
= rte_pktmbuf_prefree_seg(txep
[0].mbuf
);
99 for (i
= 1; i
< n
; i
++) {
100 m
= rte_pktmbuf_prefree_seg(txep
[i
].mbuf
);
102 if (likely(m
->pool
== free
[0]->pool
)) {
105 rte_mempool_put_bulk(free
[0]->pool
,
113 rte_mempool_put_bulk(free
[0]->pool
, (void **)free
, nb_free
);
115 for (i
= 1; i
< n
; i
++) {
116 m
= rte_pktmbuf_prefree_seg(txep
[i
].mbuf
);
118 rte_mempool_put(m
->pool
, m
);
122 /* buffers were freed, update counters */
123 txq
->nb_tx_free
= (uint16_t)(txq
->nb_tx_free
+ txq
->tx_rs_thresh
);
124 txq
->tx_next_dd
= (uint16_t)(txq
->tx_next_dd
+ txq
->tx_rs_thresh
);
125 if (txq
->tx_next_dd
>= txq
->nb_tx_desc
)
126 txq
->tx_next_dd
= (uint16_t)(txq
->tx_rs_thresh
- 1);
128 return txq
->tx_rs_thresh
;
131 static __rte_always_inline
void
132 ice_tx_backlog_entry(struct ice_tx_entry
*txep
,
133 struct rte_mbuf
**tx_pkts
, uint16_t nb_pkts
)
137 for (i
= 0; i
< (int)nb_pkts
; ++i
)
138 txep
[i
].mbuf
= tx_pkts
[i
];
142 _ice_rx_queue_release_mbufs_vec(struct ice_rx_queue
*rxq
)
144 const unsigned int mask
= rxq
->nb_rx_desc
- 1;
147 if (unlikely(!rxq
->sw_ring
)) {
148 PMD_DRV_LOG(DEBUG
, "sw_ring is NULL");
152 if (rxq
->rxrearm_nb
>= rxq
->nb_rx_desc
)
155 /* free all mbufs that are valid in the ring */
156 if (rxq
->rxrearm_nb
== 0) {
157 for (i
= 0; i
< rxq
->nb_rx_desc
; i
++) {
158 if (rxq
->sw_ring
[i
].mbuf
)
159 rte_pktmbuf_free_seg(rxq
->sw_ring
[i
].mbuf
);
162 for (i
= rxq
->rx_tail
;
163 i
!= rxq
->rxrearm_start
;
164 i
= (i
+ 1) & mask
) {
165 if (rxq
->sw_ring
[i
].mbuf
)
166 rte_pktmbuf_free_seg(rxq
->sw_ring
[i
].mbuf
);
170 rxq
->rxrearm_nb
= rxq
->nb_rx_desc
;
172 /* set all entries to NULL */
173 memset(rxq
->sw_ring
, 0, sizeof(rxq
->sw_ring
[0]) * rxq
->nb_rx_desc
);
177 _ice_tx_queue_release_mbufs_vec(struct ice_tx_queue
*txq
)
181 if (unlikely(!txq
|| !txq
->sw_ring
)) {
182 PMD_DRV_LOG(DEBUG
, "Pointer to rxq or sw_ring is NULL");
187 * vPMD tx will not set sw_ring's mbuf to NULL after free,
188 * so need to free remains more carefully.
190 i
= txq
->tx_next_dd
- txq
->tx_rs_thresh
+ 1;
191 if (txq
->tx_tail
< i
) {
192 for (; i
< txq
->nb_tx_desc
; i
++) {
193 rte_pktmbuf_free_seg(txq
->sw_ring
[i
].mbuf
);
194 txq
->sw_ring
[i
].mbuf
= NULL
;
198 for (; i
< txq
->tx_tail
; i
++) {
199 rte_pktmbuf_free_seg(txq
->sw_ring
[i
].mbuf
);
200 txq
->sw_ring
[i
].mbuf
= NULL
;
205 ice_rxq_vec_setup_default(struct ice_rx_queue
*rxq
)
208 struct rte_mbuf mb_def
= { .buf_addr
= 0 }; /* zeroed mbuf */
211 mb_def
.data_off
= RTE_PKTMBUF_HEADROOM
;
212 mb_def
.port
= rxq
->port_id
;
213 rte_mbuf_refcnt_set(&mb_def
, 1);
215 /* prevent compiler reordering: rearm_data covers previous fields */
216 rte_compiler_barrier();
217 p
= (uintptr_t)&mb_def
.rearm_data
;
218 rxq
->mbuf_initializer
= *(uint64_t *)p
;
223 ice_rx_vec_queue_default(struct ice_rx_queue
*rxq
)
228 if (!rte_is_power_of_2(rxq
->nb_rx_desc
))
231 if (rxq
->rx_free_thresh
< ICE_VPMD_RX_BURST
)
234 if (rxq
->nb_rx_desc
% rxq
->rx_free_thresh
)
240 #define ICE_NO_VECTOR_FLAGS ( \
241 DEV_TX_OFFLOAD_MULTI_SEGS | \
242 DEV_TX_OFFLOAD_VLAN_INSERT | \
243 DEV_TX_OFFLOAD_SCTP_CKSUM | \
244 DEV_TX_OFFLOAD_UDP_CKSUM | \
245 DEV_TX_OFFLOAD_TCP_CKSUM)
248 ice_tx_vec_queue_default(struct ice_tx_queue
*txq
)
253 if (txq
->offloads
& ICE_NO_VECTOR_FLAGS
)
256 if (txq
->tx_rs_thresh
< ICE_VPMD_TX_BURST
||
257 txq
->tx_rs_thresh
> ICE_TX_MAX_FREE_BUF_SZ
)
264 ice_rx_vec_dev_check_default(struct rte_eth_dev
*dev
)
267 struct ice_rx_queue
*rxq
;
269 for (i
= 0; i
< dev
->data
->nb_rx_queues
; i
++) {
270 rxq
= dev
->data
->rx_queues
[i
];
271 if (ice_rx_vec_queue_default(rxq
))
279 ice_tx_vec_dev_check_default(struct rte_eth_dev
*dev
)
282 struct ice_tx_queue
*txq
;
284 for (i
= 0; i
< dev
->data
->nb_tx_queues
; i
++) {
285 txq
= dev
->data
->tx_queues
[i
];
286 if (ice_tx_vec_queue_default(txq
))