]> git.proxmox.com Git - ceph.git/blob - ceph/src/dpdk/drivers/net/ixgbe/ixgbe_rxtx_vec_neon.c
add subtree-ish sources for 12.0.3
[ceph.git] / ceph / src / dpdk / drivers / net / ixgbe / ixgbe_rxtx_vec_neon.c
1 /*-
2 * BSD LICENSE
3 *
4 * Copyright(c) 2010-2015 Intel Corporation. All rights reserved.
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 *
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
16 * distribution.
17 * * Neither the name of Intel Corporation nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
20 *
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32 */
33
34 #include <stdint.h>
35 #include <rte_ethdev.h>
36 #include <rte_malloc.h>
37
38 #include "ixgbe_ethdev.h"
39 #include "ixgbe_rxtx.h"
40 #include "ixgbe_rxtx_vec_common.h"
41
42 #include <arm_neon.h>
43
44 #pragma GCC diagnostic ignored "-Wcast-qual"
45
46 static inline void
47 ixgbe_rxq_rearm(struct ixgbe_rx_queue *rxq)
48 {
49 int i;
50 uint16_t rx_id;
51 volatile union ixgbe_adv_rx_desc *rxdp;
52 struct ixgbe_rx_entry *rxep = &rxq->sw_ring[rxq->rxrearm_start];
53 struct rte_mbuf *mb0, *mb1;
54 uint64x2_t dma_addr0, dma_addr1;
55 uint64x2_t zero = vdupq_n_u64(0);
56 uint64_t paddr;
57 uint8x8_t p;
58
59 rxdp = rxq->rx_ring + rxq->rxrearm_start;
60
61 /* Pull 'n' more MBUFs into the software ring */
62 if (unlikely(rte_mempool_get_bulk(rxq->mb_pool,
63 (void *)rxep,
64 RTE_IXGBE_RXQ_REARM_THRESH) < 0)) {
65 if (rxq->rxrearm_nb + RTE_IXGBE_RXQ_REARM_THRESH >=
66 rxq->nb_rx_desc) {
67 for (i = 0; i < RTE_IXGBE_DESCS_PER_LOOP; i++) {
68 rxep[i].mbuf = &rxq->fake_mbuf;
69 vst1q_u64((uint64_t *)&rxdp[i].read,
70 zero);
71 }
72 }
73 rte_eth_devices[rxq->port_id].data->rx_mbuf_alloc_failed +=
74 RTE_IXGBE_RXQ_REARM_THRESH;
75 return;
76 }
77
78 p = vld1_u8((uint8_t *)&rxq->mbuf_initializer);
79
80 /* Initialize the mbufs in vector, process 2 mbufs in one loop */
81 for (i = 0; i < RTE_IXGBE_RXQ_REARM_THRESH; i += 2, rxep += 2) {
82 mb0 = rxep[0].mbuf;
83 mb1 = rxep[1].mbuf;
84
85 /*
86 * Flush mbuf with pkt template.
87 * Data to be rearmed is 6 bytes long.
88 * Though, RX will overwrite ol_flags that are coming next
89 * anyway. So overwrite whole 8 bytes with one load:
90 * 6 bytes of rearm_data plus first 2 bytes of ol_flags.
91 */
92 vst1_u8((uint8_t *)&mb0->rearm_data, p);
93 paddr = mb0->buf_physaddr + RTE_PKTMBUF_HEADROOM;
94 dma_addr0 = vsetq_lane_u64(paddr, zero, 0);
95 /* flush desc with pa dma_addr */
96 vst1q_u64((uint64_t *)&rxdp++->read, dma_addr0);
97
98 vst1_u8((uint8_t *)&mb1->rearm_data, p);
99 paddr = mb1->buf_physaddr + RTE_PKTMBUF_HEADROOM;
100 dma_addr1 = vsetq_lane_u64(paddr, zero, 0);
101 vst1q_u64((uint64_t *)&rxdp++->read, dma_addr1);
102 }
103
104 rxq->rxrearm_start += RTE_IXGBE_RXQ_REARM_THRESH;
105 if (rxq->rxrearm_start >= rxq->nb_rx_desc)
106 rxq->rxrearm_start = 0;
107
108 rxq->rxrearm_nb -= RTE_IXGBE_RXQ_REARM_THRESH;
109
110 rx_id = (uint16_t)((rxq->rxrearm_start == 0) ?
111 (rxq->nb_rx_desc - 1) : (rxq->rxrearm_start - 1));
112
113 /* Update the tail pointer on the NIC */
114 IXGBE_PCI_REG_WRITE(rxq->rdt_reg_addr, rx_id);
115 }
116
117 /* Handling the offload flags (olflags) field takes computation
118 * time when receiving packets. Therefore we provide a flag to disable
119 * the processing of the olflags field when they are not needed. This
120 * gives improved performance, at the cost of losing the offload info
121 * in the received packet
122 */
123 #ifdef RTE_IXGBE_RX_OLFLAGS_ENABLE
124
125 #define VTAG_SHIFT (3)
126
127 static inline void
128 desc_to_olflags_v(uint8x16x2_t sterr_tmp1, uint8x16x2_t sterr_tmp2,
129 uint8x16_t staterr, struct rte_mbuf **rx_pkts)
130 {
131 uint8x16_t ptype;
132 uint8x16_t vtag;
133
134 union {
135 uint8_t e[4];
136 uint32_t word;
137 } vol;
138
139 const uint8x16_t pkttype_msk = {
140 PKT_RX_VLAN_PKT, PKT_RX_VLAN_PKT,
141 PKT_RX_VLAN_PKT, PKT_RX_VLAN_PKT,
142 0x00, 0x00, 0x00, 0x00,
143 0x00, 0x00, 0x00, 0x00,
144 0x00, 0x00, 0x00, 0x00};
145
146 const uint8x16_t rsstype_msk = {
147 0x0F, 0x0F, 0x0F, 0x0F,
148 0x00, 0x00, 0x00, 0x00,
149 0x00, 0x00, 0x00, 0x00,
150 0x00, 0x00, 0x00, 0x00};
151
152 const uint8x16_t rss_flags = {
153 0, PKT_RX_RSS_HASH, PKT_RX_RSS_HASH, PKT_RX_RSS_HASH,
154 0, PKT_RX_RSS_HASH, 0, PKT_RX_RSS_HASH,
155 PKT_RX_RSS_HASH, 0, 0, 0,
156 0, 0, 0, PKT_RX_FDIR};
157
158 ptype = vzipq_u8(sterr_tmp1.val[0], sterr_tmp2.val[0]).val[0];
159 ptype = vandq_u8(ptype, rsstype_msk);
160 ptype = vqtbl1q_u8(rss_flags, ptype);
161
162 vtag = vshrq_n_u8(staterr, VTAG_SHIFT);
163 vtag = vandq_u8(vtag, pkttype_msk);
164 vtag = vorrq_u8(ptype, vtag);
165
166 vol.word = vgetq_lane_u32(vreinterpretq_u32_u8(vtag), 0);
167
168 rx_pkts[0]->ol_flags = vol.e[0];
169 rx_pkts[1]->ol_flags = vol.e[1];
170 rx_pkts[2]->ol_flags = vol.e[2];
171 rx_pkts[3]->ol_flags = vol.e[3];
172 }
173 #else
174 #define desc_to_olflags_v(sterr_tmp1, sterr_tmp2, staterr, rx_pkts)
175 #endif
176
177 /*
178 * vPMD raw receive routine, only accept(nb_pkts >= RTE_IXGBE_DESCS_PER_LOOP)
179 *
180 * Notice:
181 * - nb_pkts < RTE_IXGBE_DESCS_PER_LOOP, just return no packet
182 * - nb_pkts > RTE_IXGBE_MAX_RX_BURST, only scan RTE_IXGBE_MAX_RX_BURST
183 * numbers of DD bit
184 * - floor align nb_pkts to a RTE_IXGBE_DESC_PER_LOOP power-of-two
185 * - don't support ol_flags for rss and csum err
186 */
187
188 #define IXGBE_VPMD_DESC_DD_MASK 0x01010101
189 #define IXGBE_VPMD_DESC_EOP_MASK 0x02020202
190
191 static inline uint16_t
192 _recv_raw_pkts_vec(struct ixgbe_rx_queue *rxq, struct rte_mbuf **rx_pkts,
193 uint16_t nb_pkts, uint8_t *split_packet)
194 {
195 volatile union ixgbe_adv_rx_desc *rxdp;
196 struct ixgbe_rx_entry *sw_ring;
197 uint16_t nb_pkts_recd;
198 int pos;
199 uint64_t var;
200 uint8x16_t shuf_msk = {
201 0xFF, 0xFF,
202 0xFF, 0xFF, /* skip 32 bits pkt_type */
203 12, 13, /* octet 12~13, low 16 bits pkt_len */
204 0xFF, 0xFF, /* skip high 16 bits pkt_len, zero out */
205 12, 13, /* octet 12~13, 16 bits data_len */
206 14, 15, /* octet 14~15, low 16 bits vlan_macip */
207 4, 5, 6, 7 /* octet 4~7, 32bits rss */
208 };
209 uint16x8_t crc_adjust = {0, 0, rxq->crc_len, 0,
210 rxq->crc_len, 0, 0, 0};
211
212 /* nb_pkts shall be less equal than RTE_IXGBE_MAX_RX_BURST */
213 nb_pkts = RTE_MIN(nb_pkts, RTE_IXGBE_MAX_RX_BURST);
214
215 /* nb_pkts has to be floor-aligned to RTE_IXGBE_DESCS_PER_LOOP */
216 nb_pkts = RTE_ALIGN_FLOOR(nb_pkts, RTE_IXGBE_DESCS_PER_LOOP);
217
218 /* Just the act of getting into the function from the application is
219 * going to cost about 7 cycles
220 */
221 rxdp = rxq->rx_ring + rxq->rx_tail;
222
223 rte_prefetch_non_temporal(rxdp);
224
225 /* See if we need to rearm the RX queue - gives the prefetch a bit
226 * of time to act
227 */
228 if (rxq->rxrearm_nb > RTE_IXGBE_RXQ_REARM_THRESH)
229 ixgbe_rxq_rearm(rxq);
230
231 /* Before we start moving massive data around, check to see if
232 * there is actually a packet available
233 */
234 if (!(rxdp->wb.upper.status_error &
235 rte_cpu_to_le_32(IXGBE_RXDADV_STAT_DD)))
236 return 0;
237
238 /* Cache is empty -> need to scan the buffer rings, but first move
239 * the next 'n' mbufs into the cache
240 */
241 sw_ring = &rxq->sw_ring[rxq->rx_tail];
242
243 /* A. load 4 packet in one loop
244 * B. copy 4 mbuf point from swring to rx_pkts
245 * C. calc the number of DD bits among the 4 packets
246 * [C*. extract the end-of-packet bit, if requested]
247 * D. fill info. from desc to mbuf
248 */
249 for (pos = 0, nb_pkts_recd = 0; pos < nb_pkts;
250 pos += RTE_IXGBE_DESCS_PER_LOOP,
251 rxdp += RTE_IXGBE_DESCS_PER_LOOP) {
252 uint64x2_t descs[RTE_IXGBE_DESCS_PER_LOOP];
253 uint8x16_t pkt_mb1, pkt_mb2, pkt_mb3, pkt_mb4;
254 uint8x16x2_t sterr_tmp1, sterr_tmp2;
255 uint64x2_t mbp1, mbp2;
256 uint8x16_t staterr;
257 uint16x8_t tmp;
258 uint32_t stat;
259
260 /* B.1 load 1 mbuf point */
261 mbp1 = vld1q_u64((uint64_t *)&sw_ring[pos]);
262
263 /* Read desc statuses backwards to avoid race condition */
264 /* A.1 load 4 pkts desc */
265 descs[3] = vld1q_u64((uint64_t *)(rxdp + 3));
266 rte_rmb();
267
268 /* B.2 copy 2 mbuf point into rx_pkts */
269 vst1q_u64((uint64_t *)&rx_pkts[pos], mbp1);
270
271 /* B.1 load 1 mbuf point */
272 mbp2 = vld1q_u64((uint64_t *)&sw_ring[pos + 2]);
273
274 descs[2] = vld1q_u64((uint64_t *)(rxdp + 2));
275 /* B.1 load 2 mbuf point */
276 descs[1] = vld1q_u64((uint64_t *)(rxdp + 1));
277 descs[0] = vld1q_u64((uint64_t *)(rxdp));
278
279 /* B.2 copy 2 mbuf point into rx_pkts */
280 vst1q_u64((uint64_t *)&rx_pkts[pos + 2], mbp2);
281
282 if (split_packet) {
283 rte_mbuf_prefetch_part2(rx_pkts[pos]);
284 rte_mbuf_prefetch_part2(rx_pkts[pos + 1]);
285 rte_mbuf_prefetch_part2(rx_pkts[pos + 2]);
286 rte_mbuf_prefetch_part2(rx_pkts[pos + 3]);
287 }
288
289 /* D.1 pkt 3,4 convert format from desc to pktmbuf */
290 pkt_mb4 = vqtbl1q_u8(vreinterpretq_u8_u64(descs[3]), shuf_msk);
291 pkt_mb3 = vqtbl1q_u8(vreinterpretq_u8_u64(descs[2]), shuf_msk);
292
293 /* D.1 pkt 1,2 convert format from desc to pktmbuf */
294 pkt_mb2 = vqtbl1q_u8(vreinterpretq_u8_u64(descs[1]), shuf_msk);
295 pkt_mb1 = vqtbl1q_u8(vreinterpretq_u8_u64(descs[0]), shuf_msk);
296
297 /* C.1 4=>2 filter staterr info only */
298 sterr_tmp2 = vzipq_u8(vreinterpretq_u8_u64(descs[1]),
299 vreinterpretq_u8_u64(descs[3]));
300 /* C.1 4=>2 filter staterr info only */
301 sterr_tmp1 = vzipq_u8(vreinterpretq_u8_u64(descs[0]),
302 vreinterpretq_u8_u64(descs[2]));
303
304 /* C.2 get 4 pkts staterr value */
305 staterr = vzipq_u8(sterr_tmp1.val[1], sterr_tmp2.val[1]).val[0];
306 stat = vgetq_lane_u32(vreinterpretq_u32_u8(staterr), 0);
307
308 /* set ol_flags with vlan packet type */
309 desc_to_olflags_v(sterr_tmp1, sterr_tmp2, staterr,
310 &rx_pkts[pos]);
311
312 /* D.2 pkt 3,4 set in_port/nb_seg and remove crc */
313 tmp = vsubq_u16(vreinterpretq_u16_u8(pkt_mb4), crc_adjust);
314 pkt_mb4 = vreinterpretq_u8_u16(tmp);
315 tmp = vsubq_u16(vreinterpretq_u16_u8(pkt_mb3), crc_adjust);
316 pkt_mb3 = vreinterpretq_u8_u16(tmp);
317
318 /* D.3 copy final 3,4 data to rx_pkts */
319 vst1q_u8((void *)&rx_pkts[pos + 3]->rx_descriptor_fields1,
320 pkt_mb4);
321 vst1q_u8((void *)&rx_pkts[pos + 2]->rx_descriptor_fields1,
322 pkt_mb3);
323
324 /* D.2 pkt 1,2 set in_port/nb_seg and remove crc */
325 tmp = vsubq_u16(vreinterpretq_u16_u8(pkt_mb2), crc_adjust);
326 pkt_mb2 = vreinterpretq_u8_u16(tmp);
327 tmp = vsubq_u16(vreinterpretq_u16_u8(pkt_mb1), crc_adjust);
328 pkt_mb1 = vreinterpretq_u8_u16(tmp);
329
330 /* C* extract and record EOP bit */
331 if (split_packet) {
332 /* and with mask to extract bits, flipping 1-0 */
333 *(int *)split_packet = ~stat & IXGBE_VPMD_DESC_EOP_MASK;
334
335 split_packet += RTE_IXGBE_DESCS_PER_LOOP;
336
337 /* zero-out next pointers */
338 rx_pkts[pos]->next = NULL;
339 rx_pkts[pos + 1]->next = NULL;
340 rx_pkts[pos + 2]->next = NULL;
341 rx_pkts[pos + 3]->next = NULL;
342 }
343
344 rte_prefetch_non_temporal(rxdp + RTE_IXGBE_DESCS_PER_LOOP);
345
346 /* D.3 copy final 1,2 data to rx_pkts */
347 vst1q_u8((uint8_t *)&rx_pkts[pos + 1]->rx_descriptor_fields1,
348 pkt_mb2);
349 vst1q_u8((uint8_t *)&rx_pkts[pos]->rx_descriptor_fields1,
350 pkt_mb1);
351
352 /* C.4 calc avaialbe number of desc */
353 var = __builtin_popcount(stat & IXGBE_VPMD_DESC_DD_MASK);
354 nb_pkts_recd += var;
355 if (likely(var != RTE_IXGBE_DESCS_PER_LOOP))
356 break;
357 }
358
359 /* Update our internal tail pointer */
360 rxq->rx_tail = (uint16_t)(rxq->rx_tail + nb_pkts_recd);
361 rxq->rx_tail = (uint16_t)(rxq->rx_tail & (rxq->nb_rx_desc - 1));
362 rxq->rxrearm_nb = (uint16_t)(rxq->rxrearm_nb + nb_pkts_recd);
363
364 return nb_pkts_recd;
365 }
366
367 /*
368 * vPMD receive routine, only accept(nb_pkts >= RTE_IXGBE_DESCS_PER_LOOP)
369 *
370 * Notice:
371 * - nb_pkts < RTE_IXGBE_DESCS_PER_LOOP, just return no packet
372 * - nb_pkts > RTE_IXGBE_MAX_RX_BURST, only scan RTE_IXGBE_MAX_RX_BURST
373 * numbers of DD bit
374 * - floor align nb_pkts to a RTE_IXGBE_DESC_PER_LOOP power-of-two
375 * - don't support ol_flags for rss and csum err
376 */
377 uint16_t
378 ixgbe_recv_pkts_vec(void *rx_queue, struct rte_mbuf **rx_pkts,
379 uint16_t nb_pkts)
380 {
381 return _recv_raw_pkts_vec(rx_queue, rx_pkts, nb_pkts, NULL);
382 }
383
384 /*
385 * vPMD receive routine that reassembles scattered packets
386 *
387 * Notice:
388 * - don't support ol_flags for rss and csum err
389 * - nb_pkts < RTE_IXGBE_DESCS_PER_LOOP, just return no packet
390 * - nb_pkts > RTE_IXGBE_MAX_RX_BURST, only scan RTE_IXGBE_MAX_RX_BURST
391 * numbers of DD bit
392 * - floor align nb_pkts to a RTE_IXGBE_DESC_PER_LOOP power-of-two
393 */
394 uint16_t
395 ixgbe_recv_scattered_pkts_vec(void *rx_queue, struct rte_mbuf **rx_pkts,
396 uint16_t nb_pkts)
397 {
398 struct ixgbe_rx_queue *rxq = rx_queue;
399 uint8_t split_flags[RTE_IXGBE_MAX_RX_BURST] = {0};
400
401 /* get some new buffers */
402 uint16_t nb_bufs = _recv_raw_pkts_vec(rxq, rx_pkts, nb_pkts,
403 split_flags);
404 if (nb_bufs == 0)
405 return 0;
406
407 /* happy day case, full burst + no packets to be joined */
408 const uint64_t *split_fl64 = (uint64_t *)split_flags;
409 if (rxq->pkt_first_seg == NULL &&
410 split_fl64[0] == 0 && split_fl64[1] == 0 &&
411 split_fl64[2] == 0 && split_fl64[3] == 0)
412 return nb_bufs;
413
414 /* reassemble any packets that need reassembly*/
415 unsigned int i = 0;
416 if (rxq->pkt_first_seg == NULL) {
417 /* find the first split flag, and only reassemble then*/
418 while (i < nb_bufs && !split_flags[i])
419 i++;
420 if (i == nb_bufs)
421 return nb_bufs;
422 }
423 return i + reassemble_packets(rxq, &rx_pkts[i], nb_bufs - i,
424 &split_flags[i]);
425 }
426
427 static inline void
428 vtx1(volatile union ixgbe_adv_tx_desc *txdp,
429 struct rte_mbuf *pkt, uint64_t flags)
430 {
431 uint64x2_t descriptor = {
432 pkt->buf_physaddr + pkt->data_off,
433 (uint64_t)pkt->pkt_len << 46 | flags | pkt->data_len};
434
435 vst1q_u64((uint64_t *)&txdp->read, descriptor);
436 }
437
438 static inline void
439 vtx(volatile union ixgbe_adv_tx_desc *txdp,
440 struct rte_mbuf **pkt, uint16_t nb_pkts, uint64_t flags)
441 {
442 int i;
443
444 for (i = 0; i < nb_pkts; ++i, ++txdp, ++pkt)
445 vtx1(txdp, *pkt, flags);
446 }
447
448 uint16_t
449 ixgbe_xmit_pkts_vec(void *tx_queue, struct rte_mbuf **tx_pkts,
450 uint16_t nb_pkts)
451 {
452 struct ixgbe_tx_queue *txq = (struct ixgbe_tx_queue *)tx_queue;
453 volatile union ixgbe_adv_tx_desc *txdp;
454 struct ixgbe_tx_entry_v *txep;
455 uint16_t n, nb_commit, tx_id;
456 uint64_t flags = DCMD_DTYP_FLAGS;
457 uint64_t rs = IXGBE_ADVTXD_DCMD_RS | DCMD_DTYP_FLAGS;
458 int i;
459
460 /* cross rx_thresh boundary is not allowed */
461 nb_pkts = RTE_MIN(nb_pkts, txq->tx_rs_thresh);
462
463 if (txq->nb_tx_free < txq->tx_free_thresh)
464 ixgbe_tx_free_bufs(txq);
465
466 nb_commit = nb_pkts = (uint16_t)RTE_MIN(txq->nb_tx_free, nb_pkts);
467 if (unlikely(nb_pkts == 0))
468 return 0;
469
470 tx_id = txq->tx_tail;
471 txdp = &txq->tx_ring[tx_id];
472 txep = &txq->sw_ring_v[tx_id];
473
474 txq->nb_tx_free = (uint16_t)(txq->nb_tx_free - nb_pkts);
475
476 n = (uint16_t)(txq->nb_tx_desc - tx_id);
477 if (nb_commit >= n) {
478 tx_backlog_entry(txep, tx_pkts, n);
479
480 for (i = 0; i < n - 1; ++i, ++tx_pkts, ++txdp)
481 vtx1(txdp, *tx_pkts, flags);
482
483 vtx1(txdp, *tx_pkts++, rs);
484
485 nb_commit = (uint16_t)(nb_commit - n);
486
487 tx_id = 0;
488 txq->tx_next_rs = (uint16_t)(txq->tx_rs_thresh - 1);
489
490 /* avoid reach the end of ring */
491 txdp = &txq->tx_ring[tx_id];
492 txep = &txq->sw_ring_v[tx_id];
493 }
494
495 tx_backlog_entry(txep, tx_pkts, nb_commit);
496
497 vtx(txdp, tx_pkts, nb_commit, flags);
498
499 tx_id = (uint16_t)(tx_id + nb_commit);
500 if (tx_id > txq->tx_next_rs) {
501 txq->tx_ring[txq->tx_next_rs].read.cmd_type_len |=
502 rte_cpu_to_le_32(IXGBE_ADVTXD_DCMD_RS);
503 txq->tx_next_rs = (uint16_t)(txq->tx_next_rs +
504 txq->tx_rs_thresh);
505 }
506
507 txq->tx_tail = tx_id;
508
509 IXGBE_PCI_REG_WRITE(txq->tdt_reg_addr, txq->tx_tail);
510
511 return nb_pkts;
512 }
513
514 static void __attribute__((cold))
515 ixgbe_tx_queue_release_mbufs_vec(struct ixgbe_tx_queue *txq)
516 {
517 _ixgbe_tx_queue_release_mbufs_vec(txq);
518 }
519
520 void __attribute__((cold))
521 ixgbe_rx_queue_release_mbufs_vec(struct ixgbe_rx_queue *rxq)
522 {
523 _ixgbe_rx_queue_release_mbufs_vec(rxq);
524 }
525
526 static void __attribute__((cold))
527 ixgbe_tx_free_swring(struct ixgbe_tx_queue *txq)
528 {
529 _ixgbe_tx_free_swring_vec(txq);
530 }
531
532 static void __attribute__((cold))
533 ixgbe_reset_tx_queue(struct ixgbe_tx_queue *txq)
534 {
535 _ixgbe_reset_tx_queue_vec(txq);
536 }
537
538 static const struct ixgbe_txq_ops vec_txq_ops = {
539 .release_mbufs = ixgbe_tx_queue_release_mbufs_vec,
540 .free_swring = ixgbe_tx_free_swring,
541 .reset = ixgbe_reset_tx_queue,
542 };
543
544 int __attribute__((cold))
545 ixgbe_rxq_vec_setup(struct ixgbe_rx_queue *rxq)
546 {
547 return ixgbe_rxq_vec_setup_default(rxq);
548 }
549
550 int __attribute__((cold))
551 ixgbe_txq_vec_setup(struct ixgbe_tx_queue *txq)
552 {
553 return ixgbe_txq_vec_setup_default(txq, &vec_txq_ops);
554 }
555
556 int __attribute__((cold))
557 ixgbe_rx_vec_dev_conf_condition_check(struct rte_eth_dev *dev)
558 {
559 struct rte_eth_rxmode *rxmode = &dev->data->dev_conf.rxmode;
560
561 /* no csum error report support */
562 if (rxmode->hw_ip_checksum == 1)
563 return -1;
564
565 return ixgbe_rx_vec_dev_conf_condition_check_default(dev);
566 }