]> git.proxmox.com Git - ceph.git/blob - ceph/src/spdk/dpdk/drivers/net/ice/ice_rxtx_vec_common.h
import 15.2.0 Octopus source
[ceph.git] / ceph / src / spdk / dpdk / drivers / net / ice / ice_rxtx_vec_common.h
1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2019 Intel Corporation
3 */
4
5 #ifndef _ICE_RXTX_VEC_COMMON_H_
6 #define _ICE_RXTX_VEC_COMMON_H_
7
8 #include "ice_rxtx.h"
9
10 static inline uint16_t
11 ice_rx_reassemble_packets(struct ice_rx_queue *rxq, struct rte_mbuf **rx_bufs,
12 uint16_t nb_bufs, uint8_t *split_flags)
13 {
14 struct rte_mbuf *pkts[ICE_VPMD_RX_BURST] = {0}; /*finished pkts*/
15 struct rte_mbuf *start = rxq->pkt_first_seg;
16 struct rte_mbuf *end = rxq->pkt_last_seg;
17 unsigned int pkt_idx, buf_idx;
18
19 for (buf_idx = 0, pkt_idx = 0; buf_idx < nb_bufs; buf_idx++) {
20 if (end) {
21 /* processing a split packet */
22 end->next = rx_bufs[buf_idx];
23 rx_bufs[buf_idx]->data_len += rxq->crc_len;
24
25 start->nb_segs++;
26 start->pkt_len += rx_bufs[buf_idx]->data_len;
27 end = end->next;
28
29 if (!split_flags[buf_idx]) {
30 /* it's the last packet of the set */
31 start->hash = end->hash;
32 start->ol_flags = end->ol_flags;
33 /* we need to strip crc for the whole packet */
34 start->pkt_len -= rxq->crc_len;
35 if (end->data_len > rxq->crc_len) {
36 end->data_len -= rxq->crc_len;
37 } else {
38 /* free up last mbuf */
39 struct rte_mbuf *secondlast = start;
40
41 start->nb_segs--;
42 while (secondlast->next != end)
43 secondlast = secondlast->next;
44 secondlast->data_len -= (rxq->crc_len -
45 end->data_len);
46 secondlast->next = NULL;
47 rte_pktmbuf_free_seg(end);
48 }
49 pkts[pkt_idx++] = start;
50 start = NULL;
51 end = NULL;
52 }
53 } else {
54 /* not processing a split packet */
55 if (!split_flags[buf_idx]) {
56 /* not a split packet, save and skip */
57 pkts[pkt_idx++] = rx_bufs[buf_idx];
58 continue;
59 }
60 start = rx_bufs[buf_idx];
61 end = start;
62 rx_bufs[buf_idx]->data_len += rxq->crc_len;
63 rx_bufs[buf_idx]->pkt_len += rxq->crc_len;
64 }
65 }
66
67 /* save the partial packet for next time */
68 rxq->pkt_first_seg = start;
69 rxq->pkt_last_seg = end;
70 rte_memcpy(rx_bufs, pkts, pkt_idx * (sizeof(*pkts)));
71 return pkt_idx;
72 }
73
74 static __rte_always_inline int
75 ice_tx_free_bufs(struct ice_tx_queue *txq)
76 {
77 struct ice_tx_entry *txep;
78 uint32_t n;
79 uint32_t i;
80 int nb_free = 0;
81 struct rte_mbuf *m, *free[ICE_TX_MAX_FREE_BUF_SZ];
82
83 /* check DD bits on threshold descriptor */
84 if ((txq->tx_ring[txq->tx_next_dd].cmd_type_offset_bsz &
85 rte_cpu_to_le_64(ICE_TXD_QW1_DTYPE_M)) !=
86 rte_cpu_to_le_64(ICE_TX_DESC_DTYPE_DESC_DONE))
87 return 0;
88
89 n = txq->tx_rs_thresh;
90
91 /* first buffer to free from S/W ring is at index
92 * tx_next_dd - (tx_rs_thresh-1)
93 */
94 txep = &txq->sw_ring[txq->tx_next_dd - (n - 1)];
95 m = rte_pktmbuf_prefree_seg(txep[0].mbuf);
96 if (likely(m)) {
97 free[0] = m;
98 nb_free = 1;
99 for (i = 1; i < n; i++) {
100 m = rte_pktmbuf_prefree_seg(txep[i].mbuf);
101 if (likely(m)) {
102 if (likely(m->pool == free[0]->pool)) {
103 free[nb_free++] = m;
104 } else {
105 rte_mempool_put_bulk(free[0]->pool,
106 (void *)free,
107 nb_free);
108 free[0] = m;
109 nb_free = 1;
110 }
111 }
112 }
113 rte_mempool_put_bulk(free[0]->pool, (void **)free, nb_free);
114 } else {
115 for (i = 1; i < n; i++) {
116 m = rte_pktmbuf_prefree_seg(txep[i].mbuf);
117 if (m)
118 rte_mempool_put(m->pool, m);
119 }
120 }
121
122 /* buffers were freed, update counters */
123 txq->nb_tx_free = (uint16_t)(txq->nb_tx_free + txq->tx_rs_thresh);
124 txq->tx_next_dd = (uint16_t)(txq->tx_next_dd + txq->tx_rs_thresh);
125 if (txq->tx_next_dd >= txq->nb_tx_desc)
126 txq->tx_next_dd = (uint16_t)(txq->tx_rs_thresh - 1);
127
128 return txq->tx_rs_thresh;
129 }
130
131 static __rte_always_inline void
132 ice_tx_backlog_entry(struct ice_tx_entry *txep,
133 struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
134 {
135 int i;
136
137 for (i = 0; i < (int)nb_pkts; ++i)
138 txep[i].mbuf = tx_pkts[i];
139 }
140
141 static inline void
142 _ice_rx_queue_release_mbufs_vec(struct ice_rx_queue *rxq)
143 {
144 const unsigned int mask = rxq->nb_rx_desc - 1;
145 unsigned int i;
146
147 if (unlikely(!rxq->sw_ring)) {
148 PMD_DRV_LOG(DEBUG, "sw_ring is NULL");
149 return;
150 }
151
152 if (rxq->rxrearm_nb >= rxq->nb_rx_desc)
153 return;
154
155 /* free all mbufs that are valid in the ring */
156 if (rxq->rxrearm_nb == 0) {
157 for (i = 0; i < rxq->nb_rx_desc; i++) {
158 if (rxq->sw_ring[i].mbuf)
159 rte_pktmbuf_free_seg(rxq->sw_ring[i].mbuf);
160 }
161 } else {
162 for (i = rxq->rx_tail;
163 i != rxq->rxrearm_start;
164 i = (i + 1) & mask) {
165 if (rxq->sw_ring[i].mbuf)
166 rte_pktmbuf_free_seg(rxq->sw_ring[i].mbuf);
167 }
168 }
169
170 rxq->rxrearm_nb = rxq->nb_rx_desc;
171
172 /* set all entries to NULL */
173 memset(rxq->sw_ring, 0, sizeof(rxq->sw_ring[0]) * rxq->nb_rx_desc);
174 }
175
176 static inline void
177 _ice_tx_queue_release_mbufs_vec(struct ice_tx_queue *txq)
178 {
179 uint16_t i;
180
181 if (unlikely(!txq || !txq->sw_ring)) {
182 PMD_DRV_LOG(DEBUG, "Pointer to rxq or sw_ring is NULL");
183 return;
184 }
185
186 /**
187 * vPMD tx will not set sw_ring's mbuf to NULL after free,
188 * so need to free remains more carefully.
189 */
190 i = txq->tx_next_dd - txq->tx_rs_thresh + 1;
191 if (txq->tx_tail < i) {
192 for (; i < txq->nb_tx_desc; i++) {
193 rte_pktmbuf_free_seg(txq->sw_ring[i].mbuf);
194 txq->sw_ring[i].mbuf = NULL;
195 }
196 i = 0;
197 }
198 for (; i < txq->tx_tail; i++) {
199 rte_pktmbuf_free_seg(txq->sw_ring[i].mbuf);
200 txq->sw_ring[i].mbuf = NULL;
201 }
202 }
203
204 static inline int
205 ice_rxq_vec_setup_default(struct ice_rx_queue *rxq)
206 {
207 uintptr_t p;
208 struct rte_mbuf mb_def = { .buf_addr = 0 }; /* zeroed mbuf */
209
210 mb_def.nb_segs = 1;
211 mb_def.data_off = RTE_PKTMBUF_HEADROOM;
212 mb_def.port = rxq->port_id;
213 rte_mbuf_refcnt_set(&mb_def, 1);
214
215 /* prevent compiler reordering: rearm_data covers previous fields */
216 rte_compiler_barrier();
217 p = (uintptr_t)&mb_def.rearm_data;
218 rxq->mbuf_initializer = *(uint64_t *)p;
219 return 0;
220 }
221
222 static inline int
223 ice_rx_vec_queue_default(struct ice_rx_queue *rxq)
224 {
225 if (!rxq)
226 return -1;
227
228 if (!rte_is_power_of_2(rxq->nb_rx_desc))
229 return -1;
230
231 if (rxq->rx_free_thresh < ICE_VPMD_RX_BURST)
232 return -1;
233
234 if (rxq->nb_rx_desc % rxq->rx_free_thresh)
235 return -1;
236
237 return 0;
238 }
239
240 #define ICE_NO_VECTOR_FLAGS ( \
241 DEV_TX_OFFLOAD_MULTI_SEGS | \
242 DEV_TX_OFFLOAD_VLAN_INSERT | \
243 DEV_TX_OFFLOAD_SCTP_CKSUM | \
244 DEV_TX_OFFLOAD_UDP_CKSUM | \
245 DEV_TX_OFFLOAD_TCP_CKSUM)
246
247 static inline int
248 ice_tx_vec_queue_default(struct ice_tx_queue *txq)
249 {
250 if (!txq)
251 return -1;
252
253 if (txq->offloads & ICE_NO_VECTOR_FLAGS)
254 return -1;
255
256 if (txq->tx_rs_thresh < ICE_VPMD_TX_BURST ||
257 txq->tx_rs_thresh > ICE_TX_MAX_FREE_BUF_SZ)
258 return -1;
259
260 return 0;
261 }
262
263 static inline int
264 ice_rx_vec_dev_check_default(struct rte_eth_dev *dev)
265 {
266 int i;
267 struct ice_rx_queue *rxq;
268
269 for (i = 0; i < dev->data->nb_rx_queues; i++) {
270 rxq = dev->data->rx_queues[i];
271 if (ice_rx_vec_queue_default(rxq))
272 return -1;
273 }
274
275 return 0;
276 }
277
278 static inline int
279 ice_tx_vec_dev_check_default(struct rte_eth_dev *dev)
280 {
281 int i;
282 struct ice_tx_queue *txq;
283
284 for (i = 0; i < dev->data->nb_tx_queues; i++) {
285 txq = dev->data->tx_queues[i];
286 if (ice_tx_vec_queue_default(txq))
287 return -1;
288 }
289
290 return 0;
291 }
292
293 #endif