]>
Commit | Line | Data |
---|---|---|
9f95a23c TL |
1 | /* SPDX-License-Identifier: BSD-3-Clause |
2 | * Copyright(c) 2010-2015 Intel Corporation | |
7c673cae FG |
3 | */ |
4 | ||
5 | #ifndef _IXGBE_RXTX_VEC_COMMON_H_ | |
6 | #define _IXGBE_RXTX_VEC_COMMON_H_ | |
7 | #include <stdint.h> | |
9f95a23c | 8 | #include <rte_ethdev_driver.h> |
7c673cae FG |
9 | |
10 | #include "ixgbe_ethdev.h" | |
11 | #include "ixgbe_rxtx.h" | |
12 | ||
13 | static inline uint16_t | |
14 | reassemble_packets(struct ixgbe_rx_queue *rxq, struct rte_mbuf **rx_bufs, | |
15 | uint16_t nb_bufs, uint8_t *split_flags) | |
16 | { | |
17 | struct rte_mbuf *pkts[nb_bufs]; /*finished pkts*/ | |
18 | struct rte_mbuf *start = rxq->pkt_first_seg; | |
19 | struct rte_mbuf *end = rxq->pkt_last_seg; | |
20 | unsigned int pkt_idx, buf_idx; | |
21 | ||
22 | for (buf_idx = 0, pkt_idx = 0; buf_idx < nb_bufs; buf_idx++) { | |
23 | if (end != NULL) { | |
24 | /* processing a split packet */ | |
25 | end->next = rx_bufs[buf_idx]; | |
26 | rx_bufs[buf_idx]->data_len += rxq->crc_len; | |
27 | ||
28 | start->nb_segs++; | |
29 | start->pkt_len += rx_bufs[buf_idx]->data_len; | |
30 | end = end->next; | |
31 | ||
32 | if (!split_flags[buf_idx]) { | |
33 | /* it's the last packet of the set */ | |
34 | start->hash = end->hash; | |
35 | start->ol_flags = end->ol_flags; | |
36 | /* we need to strip crc for the whole packet */ | |
37 | start->pkt_len -= rxq->crc_len; | |
38 | if (end->data_len > rxq->crc_len) | |
39 | end->data_len -= rxq->crc_len; | |
40 | else { | |
41 | /* free up last mbuf */ | |
42 | struct rte_mbuf *secondlast = start; | |
43 | ||
44 | start->nb_segs--; | |
45 | while (secondlast->next != end) | |
46 | secondlast = secondlast->next; | |
47 | secondlast->data_len -= (rxq->crc_len - | |
48 | end->data_len); | |
49 | secondlast->next = NULL; | |
50 | rte_pktmbuf_free_seg(end); | |
51 | } | |
52 | pkts[pkt_idx++] = start; | |
53 | start = end = NULL; | |
54 | } | |
55 | } else { | |
56 | /* not processing a split packet */ | |
57 | if (!split_flags[buf_idx]) { | |
58 | /* not a split packet, save and skip */ | |
59 | pkts[pkt_idx++] = rx_bufs[buf_idx]; | |
60 | continue; | |
61 | } | |
62 | end = start = rx_bufs[buf_idx]; | |
63 | rx_bufs[buf_idx]->data_len += rxq->crc_len; | |
64 | rx_bufs[buf_idx]->pkt_len += rxq->crc_len; | |
65 | } | |
66 | } | |
67 | ||
68 | /* save the partial packet for next time */ | |
69 | rxq->pkt_first_seg = start; | |
70 | rxq->pkt_last_seg = end; | |
71 | memcpy(rx_bufs, pkts, pkt_idx * (sizeof(*pkts))); | |
72 | return pkt_idx; | |
73 | } | |
74 | ||
9f95a23c | 75 | static __rte_always_inline int |
7c673cae FG |
76 | ixgbe_tx_free_bufs(struct ixgbe_tx_queue *txq) |
77 | { | |
78 | struct ixgbe_tx_entry_v *txep; | |
79 | uint32_t status; | |
80 | uint32_t n; | |
81 | uint32_t i; | |
82 | int nb_free = 0; | |
83 | struct rte_mbuf *m, *free[RTE_IXGBE_TX_MAX_FREE_BUF_SZ]; | |
84 | ||
85 | /* check DD bit on threshold descriptor */ | |
86 | status = txq->tx_ring[txq->tx_next_dd].wb.status; | |
87 | if (!(status & IXGBE_ADVTXD_STAT_DD)) | |
88 | return 0; | |
89 | ||
90 | n = txq->tx_rs_thresh; | |
91 | ||
92 | /* | |
93 | * first buffer to free from S/W ring is at index | |
94 | * tx_next_dd - (tx_rs_thresh-1) | |
95 | */ | |
96 | txep = &txq->sw_ring_v[txq->tx_next_dd - (n - 1)]; | |
11fdf7f2 | 97 | m = rte_pktmbuf_prefree_seg(txep[0].mbuf); |
7c673cae FG |
98 | if (likely(m != NULL)) { |
99 | free[0] = m; | |
100 | nb_free = 1; | |
101 | for (i = 1; i < n; i++) { | |
11fdf7f2 | 102 | m = rte_pktmbuf_prefree_seg(txep[i].mbuf); |
7c673cae FG |
103 | if (likely(m != NULL)) { |
104 | if (likely(m->pool == free[0]->pool)) | |
105 | free[nb_free++] = m; | |
106 | else { | |
107 | rte_mempool_put_bulk(free[0]->pool, | |
108 | (void *)free, nb_free); | |
109 | free[0] = m; | |
110 | nb_free = 1; | |
111 | } | |
112 | } | |
113 | } | |
114 | rte_mempool_put_bulk(free[0]->pool, (void **)free, nb_free); | |
115 | } else { | |
116 | for (i = 1; i < n; i++) { | |
11fdf7f2 | 117 | m = rte_pktmbuf_prefree_seg(txep[i].mbuf); |
7c673cae FG |
118 | if (m != NULL) |
119 | rte_mempool_put(m->pool, m); | |
120 | } | |
121 | } | |
122 | ||
123 | /* buffers were freed, update counters */ | |
124 | txq->nb_tx_free = (uint16_t)(txq->nb_tx_free + txq->tx_rs_thresh); | |
125 | txq->tx_next_dd = (uint16_t)(txq->tx_next_dd + txq->tx_rs_thresh); | |
126 | if (txq->tx_next_dd >= txq->nb_tx_desc) | |
127 | txq->tx_next_dd = (uint16_t)(txq->tx_rs_thresh - 1); | |
128 | ||
129 | return txq->tx_rs_thresh; | |
130 | } | |
131 | ||
9f95a23c | 132 | static __rte_always_inline void |
7c673cae FG |
133 | tx_backlog_entry(struct ixgbe_tx_entry_v *txep, |
134 | struct rte_mbuf **tx_pkts, uint16_t nb_pkts) | |
135 | { | |
136 | int i; | |
137 | ||
138 | for (i = 0; i < (int)nb_pkts; ++i) | |
139 | txep[i].mbuf = tx_pkts[i]; | |
140 | } | |
141 | ||
142 | static inline void | |
143 | _ixgbe_tx_queue_release_mbufs_vec(struct ixgbe_tx_queue *txq) | |
144 | { | |
145 | unsigned int i; | |
146 | struct ixgbe_tx_entry_v *txe; | |
147 | const uint16_t max_desc = (uint16_t)(txq->nb_tx_desc - 1); | |
148 | ||
149 | if (txq->sw_ring == NULL || txq->nb_tx_free == max_desc) | |
150 | return; | |
151 | ||
152 | /* release the used mbufs in sw_ring */ | |
153 | for (i = txq->tx_next_dd - (txq->tx_rs_thresh - 1); | |
154 | i != txq->tx_tail; | |
155 | i = (i + 1) & max_desc) { | |
156 | txe = &txq->sw_ring_v[i]; | |
157 | rte_pktmbuf_free_seg(txe->mbuf); | |
158 | } | |
159 | txq->nb_tx_free = max_desc; | |
160 | ||
161 | /* reset tx_entry */ | |
162 | for (i = 0; i < txq->nb_tx_desc; i++) { | |
163 | txe = &txq->sw_ring_v[i]; | |
164 | txe->mbuf = NULL; | |
165 | } | |
166 | } | |
167 | ||
168 | static inline void | |
169 | _ixgbe_rx_queue_release_mbufs_vec(struct ixgbe_rx_queue *rxq) | |
170 | { | |
171 | const unsigned int mask = rxq->nb_rx_desc - 1; | |
172 | unsigned int i; | |
173 | ||
174 | if (rxq->sw_ring == NULL || rxq->rxrearm_nb >= rxq->nb_rx_desc) | |
175 | return; | |
176 | ||
177 | /* free all mbufs that are valid in the ring */ | |
178 | if (rxq->rxrearm_nb == 0) { | |
179 | for (i = 0; i < rxq->nb_rx_desc; i++) { | |
180 | if (rxq->sw_ring[i].mbuf != NULL) | |
181 | rte_pktmbuf_free_seg(rxq->sw_ring[i].mbuf); | |
182 | } | |
183 | } else { | |
184 | for (i = rxq->rx_tail; | |
185 | i != rxq->rxrearm_start; | |
186 | i = (i + 1) & mask) { | |
187 | if (rxq->sw_ring[i].mbuf != NULL) | |
188 | rte_pktmbuf_free_seg(rxq->sw_ring[i].mbuf); | |
189 | } | |
190 | } | |
191 | ||
192 | rxq->rxrearm_nb = rxq->nb_rx_desc; | |
193 | ||
194 | /* set all entries to NULL */ | |
195 | memset(rxq->sw_ring, 0, sizeof(rxq->sw_ring[0]) * rxq->nb_rx_desc); | |
196 | } | |
197 | ||
198 | static inline void | |
199 | _ixgbe_tx_free_swring_vec(struct ixgbe_tx_queue *txq) | |
200 | { | |
201 | if (txq == NULL) | |
202 | return; | |
203 | ||
204 | if (txq->sw_ring != NULL) { | |
205 | rte_free(txq->sw_ring_v - 1); | |
206 | txq->sw_ring_v = NULL; | |
207 | } | |
208 | } | |
209 | ||
210 | static inline void | |
211 | _ixgbe_reset_tx_queue_vec(struct ixgbe_tx_queue *txq) | |
212 | { | |
213 | static const union ixgbe_adv_tx_desc zeroed_desc = { { 0 } }; | |
214 | struct ixgbe_tx_entry_v *txe = txq->sw_ring_v; | |
215 | uint16_t i; | |
216 | ||
217 | /* Zero out HW ring memory */ | |
218 | for (i = 0; i < txq->nb_tx_desc; i++) | |
219 | txq->tx_ring[i] = zeroed_desc; | |
220 | ||
221 | /* Initialize SW ring entries */ | |
222 | for (i = 0; i < txq->nb_tx_desc; i++) { | |
223 | volatile union ixgbe_adv_tx_desc *txd = &txq->tx_ring[i]; | |
224 | ||
225 | txd->wb.status = IXGBE_TXD_STAT_DD; | |
226 | txe[i].mbuf = NULL; | |
227 | } | |
228 | ||
229 | txq->tx_next_dd = (uint16_t)(txq->tx_rs_thresh - 1); | |
230 | txq->tx_next_rs = (uint16_t)(txq->tx_rs_thresh - 1); | |
231 | ||
232 | txq->tx_tail = 0; | |
233 | txq->nb_tx_used = 0; | |
234 | /* | |
235 | * Always allow 1 descriptor to be un-allocated to avoid | |
236 | * a H/W race condition | |
237 | */ | |
238 | txq->last_desc_cleaned = (uint16_t)(txq->nb_tx_desc - 1); | |
239 | txq->nb_tx_free = (uint16_t)(txq->nb_tx_desc - 1); | |
240 | txq->ctx_curr = 0; | |
241 | memset((void *)&txq->ctx_cache, 0, | |
242 | IXGBE_CTX_NUM * sizeof(struct ixgbe_advctx_info)); | |
243 | } | |
244 | ||
245 | static inline int | |
246 | ixgbe_rxq_vec_setup_default(struct ixgbe_rx_queue *rxq) | |
247 | { | |
248 | uintptr_t p; | |
249 | struct rte_mbuf mb_def = { .buf_addr = 0 }; /* zeroed mbuf */ | |
250 | ||
251 | mb_def.nb_segs = 1; | |
252 | mb_def.data_off = RTE_PKTMBUF_HEADROOM; | |
253 | mb_def.port = rxq->port_id; | |
254 | rte_mbuf_refcnt_set(&mb_def, 1); | |
255 | ||
256 | /* prevent compiler reordering: rearm_data covers previous fields */ | |
257 | rte_compiler_barrier(); | |
258 | p = (uintptr_t)&mb_def.rearm_data; | |
259 | rxq->mbuf_initializer = *(uint64_t *)p; | |
260 | return 0; | |
261 | } | |
262 | ||
263 | static inline int | |
264 | ixgbe_txq_vec_setup_default(struct ixgbe_tx_queue *txq, | |
265 | const struct ixgbe_txq_ops *txq_ops) | |
266 | { | |
267 | if (txq->sw_ring_v == NULL) | |
268 | return -1; | |
269 | ||
270 | /* leave the first one for overflow */ | |
271 | txq->sw_ring_v = txq->sw_ring_v + 1; | |
272 | txq->ops = txq_ops; | |
273 | ||
274 | return 0; | |
275 | } | |
276 | ||
277 | static inline int | |
278 | ixgbe_rx_vec_dev_conf_condition_check_default(struct rte_eth_dev *dev) | |
279 | { | |
280 | #ifndef RTE_LIBRTE_IEEE1588 | |
7c673cae FG |
281 | struct rte_fdir_conf *fconf = &dev->data->dev_conf.fdir_conf; |
282 | ||
7c673cae FG |
283 | /* no fdir support */ |
284 | if (fconf->mode != RTE_FDIR_MODE_NONE) | |
285 | return -1; | |
286 | ||
7c673cae FG |
287 | return 0; |
288 | #else | |
289 | RTE_SET_USED(dev); | |
290 | return -1; | |
291 | #endif | |
292 | } | |
293 | #endif |