4 * Copyright(c) 2010-2015 Intel Corporation. All rights reserved.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * * Neither the name of Intel Corporation nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
35 #include <rte_ethdev.h>
36 #include <rte_malloc.h>
38 #include "ixgbe_ethdev.h"
39 #include "ixgbe_rxtx.h"
40 #include "ixgbe_rxtx_vec_common.h"
44 #pragma GCC diagnostic ignored "-Wcast-qual"
47 ixgbe_rxq_rearm(struct ixgbe_rx_queue
*rxq
)
51 volatile union ixgbe_adv_rx_desc
*rxdp
;
52 struct ixgbe_rx_entry
*rxep
= &rxq
->sw_ring
[rxq
->rxrearm_start
];
53 struct rte_mbuf
*mb0
, *mb1
;
54 uint64x2_t dma_addr0
, dma_addr1
;
55 uint64x2_t zero
= vdupq_n_u64(0);
59 rxdp
= rxq
->rx_ring
+ rxq
->rxrearm_start
;
61 /* Pull 'n' more MBUFs into the software ring */
62 if (unlikely(rte_mempool_get_bulk(rxq
->mb_pool
,
64 RTE_IXGBE_RXQ_REARM_THRESH
) < 0)) {
65 if (rxq
->rxrearm_nb
+ RTE_IXGBE_RXQ_REARM_THRESH
>=
67 for (i
= 0; i
< RTE_IXGBE_DESCS_PER_LOOP
; i
++) {
68 rxep
[i
].mbuf
= &rxq
->fake_mbuf
;
69 vst1q_u64((uint64_t *)&rxdp
[i
].read
,
73 rte_eth_devices
[rxq
->port_id
].data
->rx_mbuf_alloc_failed
+=
74 RTE_IXGBE_RXQ_REARM_THRESH
;
78 p
= vld1_u8((uint8_t *)&rxq
->mbuf_initializer
);
80 /* Initialize the mbufs in vector, process 2 mbufs in one loop */
81 for (i
= 0; i
< RTE_IXGBE_RXQ_REARM_THRESH
; i
+= 2, rxep
+= 2) {
86 * Flush mbuf with pkt template.
87 * Data to be rearmed is 6 bytes long.
88 * Though, RX will overwrite ol_flags that are coming next
89 * anyway. So overwrite whole 8 bytes with one load:
90 * 6 bytes of rearm_data plus first 2 bytes of ol_flags.
92 vst1_u8((uint8_t *)&mb0
->rearm_data
, p
);
93 paddr
= mb0
->buf_physaddr
+ RTE_PKTMBUF_HEADROOM
;
94 dma_addr0
= vsetq_lane_u64(paddr
, zero
, 0);
95 /* flush desc with pa dma_addr */
96 vst1q_u64((uint64_t *)&rxdp
++->read
, dma_addr0
);
98 vst1_u8((uint8_t *)&mb1
->rearm_data
, p
);
99 paddr
= mb1
->buf_physaddr
+ RTE_PKTMBUF_HEADROOM
;
100 dma_addr1
= vsetq_lane_u64(paddr
, zero
, 0);
101 vst1q_u64((uint64_t *)&rxdp
++->read
, dma_addr1
);
104 rxq
->rxrearm_start
+= RTE_IXGBE_RXQ_REARM_THRESH
;
105 if (rxq
->rxrearm_start
>= rxq
->nb_rx_desc
)
106 rxq
->rxrearm_start
= 0;
108 rxq
->rxrearm_nb
-= RTE_IXGBE_RXQ_REARM_THRESH
;
110 rx_id
= (uint16_t)((rxq
->rxrearm_start
== 0) ?
111 (rxq
->nb_rx_desc
- 1) : (rxq
->rxrearm_start
- 1));
113 /* Update the tail pointer on the NIC */
114 IXGBE_PCI_REG_WRITE(rxq
->rdt_reg_addr
, rx_id
);
117 /* Handling the offload flags (olflags) field takes computation
118 * time when receiving packets. Therefore we provide a flag to disable
119 * the processing of the olflags field when they are not needed. This
120 * gives improved performance, at the cost of losing the offload info
121 * in the received packet
123 #ifdef RTE_IXGBE_RX_OLFLAGS_ENABLE
125 #define VTAG_SHIFT (3)
128 desc_to_olflags_v(uint8x16x2_t sterr_tmp1
, uint8x16x2_t sterr_tmp2
,
129 uint8x16_t staterr
, struct rte_mbuf
**rx_pkts
)
139 const uint8x16_t pkttype_msk
= {
140 PKT_RX_VLAN_PKT
, PKT_RX_VLAN_PKT
,
141 PKT_RX_VLAN_PKT
, PKT_RX_VLAN_PKT
,
142 0x00, 0x00, 0x00, 0x00,
143 0x00, 0x00, 0x00, 0x00,
144 0x00, 0x00, 0x00, 0x00};
146 const uint8x16_t rsstype_msk
= {
147 0x0F, 0x0F, 0x0F, 0x0F,
148 0x00, 0x00, 0x00, 0x00,
149 0x00, 0x00, 0x00, 0x00,
150 0x00, 0x00, 0x00, 0x00};
152 const uint8x16_t rss_flags
= {
153 0, PKT_RX_RSS_HASH
, PKT_RX_RSS_HASH
, PKT_RX_RSS_HASH
,
154 0, PKT_RX_RSS_HASH
, 0, PKT_RX_RSS_HASH
,
155 PKT_RX_RSS_HASH
, 0, 0, 0,
156 0, 0, 0, PKT_RX_FDIR
};
158 ptype
= vzipq_u8(sterr_tmp1
.val
[0], sterr_tmp2
.val
[0]).val
[0];
159 ptype
= vandq_u8(ptype
, rsstype_msk
);
160 ptype
= vqtbl1q_u8(rss_flags
, ptype
);
162 vtag
= vshrq_n_u8(staterr
, VTAG_SHIFT
);
163 vtag
= vandq_u8(vtag
, pkttype_msk
);
164 vtag
= vorrq_u8(ptype
, vtag
);
166 vol
.word
= vgetq_lane_u32(vreinterpretq_u32_u8(vtag
), 0);
168 rx_pkts
[0]->ol_flags
= vol
.e
[0];
169 rx_pkts
[1]->ol_flags
= vol
.e
[1];
170 rx_pkts
[2]->ol_flags
= vol
.e
[2];
171 rx_pkts
[3]->ol_flags
= vol
.e
[3];
174 #define desc_to_olflags_v(sterr_tmp1, sterr_tmp2, staterr, rx_pkts)
178 * vPMD raw receive routine, only accept(nb_pkts >= RTE_IXGBE_DESCS_PER_LOOP)
181 * - nb_pkts < RTE_IXGBE_DESCS_PER_LOOP, just return no packet
182 * - nb_pkts > RTE_IXGBE_MAX_RX_BURST, only scan RTE_IXGBE_MAX_RX_BURST
184 * - floor align nb_pkts to a RTE_IXGBE_DESC_PER_LOOP power-of-two
185 * - don't support ol_flags for rss and csum err
188 #define IXGBE_VPMD_DESC_DD_MASK 0x01010101
189 #define IXGBE_VPMD_DESC_EOP_MASK 0x02020202
191 static inline uint16_t
192 _recv_raw_pkts_vec(struct ixgbe_rx_queue
*rxq
, struct rte_mbuf
**rx_pkts
,
193 uint16_t nb_pkts
, uint8_t *split_packet
)
195 volatile union ixgbe_adv_rx_desc
*rxdp
;
196 struct ixgbe_rx_entry
*sw_ring
;
197 uint16_t nb_pkts_recd
;
200 uint8x16_t shuf_msk
= {
202 0xFF, 0xFF, /* skip 32 bits pkt_type */
203 12, 13, /* octet 12~13, low 16 bits pkt_len */
204 0xFF, 0xFF, /* skip high 16 bits pkt_len, zero out */
205 12, 13, /* octet 12~13, 16 bits data_len */
206 14, 15, /* octet 14~15, low 16 bits vlan_macip */
207 4, 5, 6, 7 /* octet 4~7, 32bits rss */
209 uint16x8_t crc_adjust
= {0, 0, rxq
->crc_len
, 0,
210 rxq
->crc_len
, 0, 0, 0};
212 /* nb_pkts shall be less equal than RTE_IXGBE_MAX_RX_BURST */
213 nb_pkts
= RTE_MIN(nb_pkts
, RTE_IXGBE_MAX_RX_BURST
);
215 /* nb_pkts has to be floor-aligned to RTE_IXGBE_DESCS_PER_LOOP */
216 nb_pkts
= RTE_ALIGN_FLOOR(nb_pkts
, RTE_IXGBE_DESCS_PER_LOOP
);
218 /* Just the act of getting into the function from the application is
219 * going to cost about 7 cycles
221 rxdp
= rxq
->rx_ring
+ rxq
->rx_tail
;
223 rte_prefetch_non_temporal(rxdp
);
225 /* See if we need to rearm the RX queue - gives the prefetch a bit
228 if (rxq
->rxrearm_nb
> RTE_IXGBE_RXQ_REARM_THRESH
)
229 ixgbe_rxq_rearm(rxq
);
231 /* Before we start moving massive data around, check to see if
232 * there is actually a packet available
234 if (!(rxdp
->wb
.upper
.status_error
&
235 rte_cpu_to_le_32(IXGBE_RXDADV_STAT_DD
)))
238 /* Cache is empty -> need to scan the buffer rings, but first move
239 * the next 'n' mbufs into the cache
241 sw_ring
= &rxq
->sw_ring
[rxq
->rx_tail
];
243 /* A. load 4 packet in one loop
244 * B. copy 4 mbuf point from swring to rx_pkts
245 * C. calc the number of DD bits among the 4 packets
246 * [C*. extract the end-of-packet bit, if requested]
247 * D. fill info. from desc to mbuf
249 for (pos
= 0, nb_pkts_recd
= 0; pos
< nb_pkts
;
250 pos
+= RTE_IXGBE_DESCS_PER_LOOP
,
251 rxdp
+= RTE_IXGBE_DESCS_PER_LOOP
) {
252 uint64x2_t descs
[RTE_IXGBE_DESCS_PER_LOOP
];
253 uint8x16_t pkt_mb1
, pkt_mb2
, pkt_mb3
, pkt_mb4
;
254 uint8x16x2_t sterr_tmp1
, sterr_tmp2
;
255 uint64x2_t mbp1
, mbp2
;
260 /* B.1 load 1 mbuf point */
261 mbp1
= vld1q_u64((uint64_t *)&sw_ring
[pos
]);
263 /* Read desc statuses backwards to avoid race condition */
264 /* A.1 load 4 pkts desc */
265 descs
[3] = vld1q_u64((uint64_t *)(rxdp
+ 3));
268 /* B.2 copy 2 mbuf point into rx_pkts */
269 vst1q_u64((uint64_t *)&rx_pkts
[pos
], mbp1
);
271 /* B.1 load 1 mbuf point */
272 mbp2
= vld1q_u64((uint64_t *)&sw_ring
[pos
+ 2]);
274 descs
[2] = vld1q_u64((uint64_t *)(rxdp
+ 2));
275 /* B.1 load 2 mbuf point */
276 descs
[1] = vld1q_u64((uint64_t *)(rxdp
+ 1));
277 descs
[0] = vld1q_u64((uint64_t *)(rxdp
));
279 /* B.2 copy 2 mbuf point into rx_pkts */
280 vst1q_u64((uint64_t *)&rx_pkts
[pos
+ 2], mbp2
);
283 rte_mbuf_prefetch_part2(rx_pkts
[pos
]);
284 rte_mbuf_prefetch_part2(rx_pkts
[pos
+ 1]);
285 rte_mbuf_prefetch_part2(rx_pkts
[pos
+ 2]);
286 rte_mbuf_prefetch_part2(rx_pkts
[pos
+ 3]);
289 /* D.1 pkt 3,4 convert format from desc to pktmbuf */
290 pkt_mb4
= vqtbl1q_u8(vreinterpretq_u8_u64(descs
[3]), shuf_msk
);
291 pkt_mb3
= vqtbl1q_u8(vreinterpretq_u8_u64(descs
[2]), shuf_msk
);
293 /* D.1 pkt 1,2 convert format from desc to pktmbuf */
294 pkt_mb2
= vqtbl1q_u8(vreinterpretq_u8_u64(descs
[1]), shuf_msk
);
295 pkt_mb1
= vqtbl1q_u8(vreinterpretq_u8_u64(descs
[0]), shuf_msk
);
297 /* C.1 4=>2 filter staterr info only */
298 sterr_tmp2
= vzipq_u8(vreinterpretq_u8_u64(descs
[1]),
299 vreinterpretq_u8_u64(descs
[3]));
300 /* C.1 4=>2 filter staterr info only */
301 sterr_tmp1
= vzipq_u8(vreinterpretq_u8_u64(descs
[0]),
302 vreinterpretq_u8_u64(descs
[2]));
304 /* C.2 get 4 pkts staterr value */
305 staterr
= vzipq_u8(sterr_tmp1
.val
[1], sterr_tmp2
.val
[1]).val
[0];
306 stat
= vgetq_lane_u32(vreinterpretq_u32_u8(staterr
), 0);
308 /* set ol_flags with vlan packet type */
309 desc_to_olflags_v(sterr_tmp1
, sterr_tmp2
, staterr
,
312 /* D.2 pkt 3,4 set in_port/nb_seg and remove crc */
313 tmp
= vsubq_u16(vreinterpretq_u16_u8(pkt_mb4
), crc_adjust
);
314 pkt_mb4
= vreinterpretq_u8_u16(tmp
);
315 tmp
= vsubq_u16(vreinterpretq_u16_u8(pkt_mb3
), crc_adjust
);
316 pkt_mb3
= vreinterpretq_u8_u16(tmp
);
318 /* D.3 copy final 3,4 data to rx_pkts */
319 vst1q_u8((void *)&rx_pkts
[pos
+ 3]->rx_descriptor_fields1
,
321 vst1q_u8((void *)&rx_pkts
[pos
+ 2]->rx_descriptor_fields1
,
324 /* D.2 pkt 1,2 set in_port/nb_seg and remove crc */
325 tmp
= vsubq_u16(vreinterpretq_u16_u8(pkt_mb2
), crc_adjust
);
326 pkt_mb2
= vreinterpretq_u8_u16(tmp
);
327 tmp
= vsubq_u16(vreinterpretq_u16_u8(pkt_mb1
), crc_adjust
);
328 pkt_mb1
= vreinterpretq_u8_u16(tmp
);
330 /* C* extract and record EOP bit */
332 /* and with mask to extract bits, flipping 1-0 */
333 *(int *)split_packet
= ~stat
& IXGBE_VPMD_DESC_EOP_MASK
;
335 split_packet
+= RTE_IXGBE_DESCS_PER_LOOP
;
337 /* zero-out next pointers */
338 rx_pkts
[pos
]->next
= NULL
;
339 rx_pkts
[pos
+ 1]->next
= NULL
;
340 rx_pkts
[pos
+ 2]->next
= NULL
;
341 rx_pkts
[pos
+ 3]->next
= NULL
;
344 rte_prefetch_non_temporal(rxdp
+ RTE_IXGBE_DESCS_PER_LOOP
);
346 /* D.3 copy final 1,2 data to rx_pkts */
347 vst1q_u8((uint8_t *)&rx_pkts
[pos
+ 1]->rx_descriptor_fields1
,
349 vst1q_u8((uint8_t *)&rx_pkts
[pos
]->rx_descriptor_fields1
,
352 /* C.4 calc avaialbe number of desc */
353 var
= __builtin_popcount(stat
& IXGBE_VPMD_DESC_DD_MASK
);
355 if (likely(var
!= RTE_IXGBE_DESCS_PER_LOOP
))
359 /* Update our internal tail pointer */
360 rxq
->rx_tail
= (uint16_t)(rxq
->rx_tail
+ nb_pkts_recd
);
361 rxq
->rx_tail
= (uint16_t)(rxq
->rx_tail
& (rxq
->nb_rx_desc
- 1));
362 rxq
->rxrearm_nb
= (uint16_t)(rxq
->rxrearm_nb
+ nb_pkts_recd
);
368 * vPMD receive routine, only accept(nb_pkts >= RTE_IXGBE_DESCS_PER_LOOP)
371 * - nb_pkts < RTE_IXGBE_DESCS_PER_LOOP, just return no packet
372 * - nb_pkts > RTE_IXGBE_MAX_RX_BURST, only scan RTE_IXGBE_MAX_RX_BURST
374 * - floor align nb_pkts to a RTE_IXGBE_DESC_PER_LOOP power-of-two
375 * - don't support ol_flags for rss and csum err
378 ixgbe_recv_pkts_vec(void *rx_queue
, struct rte_mbuf
**rx_pkts
,
381 return _recv_raw_pkts_vec(rx_queue
, rx_pkts
, nb_pkts
, NULL
);
385 * vPMD receive routine that reassembles scattered packets
388 * - don't support ol_flags for rss and csum err
389 * - nb_pkts < RTE_IXGBE_DESCS_PER_LOOP, just return no packet
390 * - nb_pkts > RTE_IXGBE_MAX_RX_BURST, only scan RTE_IXGBE_MAX_RX_BURST
392 * - floor align nb_pkts to a RTE_IXGBE_DESC_PER_LOOP power-of-two
395 ixgbe_recv_scattered_pkts_vec(void *rx_queue
, struct rte_mbuf
**rx_pkts
,
398 struct ixgbe_rx_queue
*rxq
= rx_queue
;
399 uint8_t split_flags
[RTE_IXGBE_MAX_RX_BURST
] = {0};
401 /* get some new buffers */
402 uint16_t nb_bufs
= _recv_raw_pkts_vec(rxq
, rx_pkts
, nb_pkts
,
407 /* happy day case, full burst + no packets to be joined */
408 const uint64_t *split_fl64
= (uint64_t *)split_flags
;
409 if (rxq
->pkt_first_seg
== NULL
&&
410 split_fl64
[0] == 0 && split_fl64
[1] == 0 &&
411 split_fl64
[2] == 0 && split_fl64
[3] == 0)
414 /* reassemble any packets that need reassembly*/
416 if (rxq
->pkt_first_seg
== NULL
) {
417 /* find the first split flag, and only reassemble then*/
418 while (i
< nb_bufs
&& !split_flags
[i
])
423 return i
+ reassemble_packets(rxq
, &rx_pkts
[i
], nb_bufs
- i
,
428 vtx1(volatile union ixgbe_adv_tx_desc
*txdp
,
429 struct rte_mbuf
*pkt
, uint64_t flags
)
431 uint64x2_t descriptor
= {
432 pkt
->buf_physaddr
+ pkt
->data_off
,
433 (uint64_t)pkt
->pkt_len
<< 46 | flags
| pkt
->data_len
};
435 vst1q_u64((uint64_t *)&txdp
->read
, descriptor
);
439 vtx(volatile union ixgbe_adv_tx_desc
*txdp
,
440 struct rte_mbuf
**pkt
, uint16_t nb_pkts
, uint64_t flags
)
444 for (i
= 0; i
< nb_pkts
; ++i
, ++txdp
, ++pkt
)
445 vtx1(txdp
, *pkt
, flags
);
449 ixgbe_xmit_pkts_vec(void *tx_queue
, struct rte_mbuf
**tx_pkts
,
452 struct ixgbe_tx_queue
*txq
= (struct ixgbe_tx_queue
*)tx_queue
;
453 volatile union ixgbe_adv_tx_desc
*txdp
;
454 struct ixgbe_tx_entry_v
*txep
;
455 uint16_t n
, nb_commit
, tx_id
;
456 uint64_t flags
= DCMD_DTYP_FLAGS
;
457 uint64_t rs
= IXGBE_ADVTXD_DCMD_RS
| DCMD_DTYP_FLAGS
;
460 /* cross rx_thresh boundary is not allowed */
461 nb_pkts
= RTE_MIN(nb_pkts
, txq
->tx_rs_thresh
);
463 if (txq
->nb_tx_free
< txq
->tx_free_thresh
)
464 ixgbe_tx_free_bufs(txq
);
466 nb_commit
= nb_pkts
= (uint16_t)RTE_MIN(txq
->nb_tx_free
, nb_pkts
);
467 if (unlikely(nb_pkts
== 0))
470 tx_id
= txq
->tx_tail
;
471 txdp
= &txq
->tx_ring
[tx_id
];
472 txep
= &txq
->sw_ring_v
[tx_id
];
474 txq
->nb_tx_free
= (uint16_t)(txq
->nb_tx_free
- nb_pkts
);
476 n
= (uint16_t)(txq
->nb_tx_desc
- tx_id
);
477 if (nb_commit
>= n
) {
478 tx_backlog_entry(txep
, tx_pkts
, n
);
480 for (i
= 0; i
< n
- 1; ++i
, ++tx_pkts
, ++txdp
)
481 vtx1(txdp
, *tx_pkts
, flags
);
483 vtx1(txdp
, *tx_pkts
++, rs
);
485 nb_commit
= (uint16_t)(nb_commit
- n
);
488 txq
->tx_next_rs
= (uint16_t)(txq
->tx_rs_thresh
- 1);
490 /* avoid reach the end of ring */
491 txdp
= &txq
->tx_ring
[tx_id
];
492 txep
= &txq
->sw_ring_v
[tx_id
];
495 tx_backlog_entry(txep
, tx_pkts
, nb_commit
);
497 vtx(txdp
, tx_pkts
, nb_commit
, flags
);
499 tx_id
= (uint16_t)(tx_id
+ nb_commit
);
500 if (tx_id
> txq
->tx_next_rs
) {
501 txq
->tx_ring
[txq
->tx_next_rs
].read
.cmd_type_len
|=
502 rte_cpu_to_le_32(IXGBE_ADVTXD_DCMD_RS
);
503 txq
->tx_next_rs
= (uint16_t)(txq
->tx_next_rs
+
507 txq
->tx_tail
= tx_id
;
509 IXGBE_PCI_REG_WRITE(txq
->tdt_reg_addr
, txq
->tx_tail
);
514 static void __attribute__((cold
))
515 ixgbe_tx_queue_release_mbufs_vec(struct ixgbe_tx_queue
*txq
)
517 _ixgbe_tx_queue_release_mbufs_vec(txq
);
520 void __attribute__((cold
))
521 ixgbe_rx_queue_release_mbufs_vec(struct ixgbe_rx_queue
*rxq
)
523 _ixgbe_rx_queue_release_mbufs_vec(rxq
);
526 static void __attribute__((cold
))
527 ixgbe_tx_free_swring(struct ixgbe_tx_queue
*txq
)
529 _ixgbe_tx_free_swring_vec(txq
);
532 static void __attribute__((cold
))
533 ixgbe_reset_tx_queue(struct ixgbe_tx_queue
*txq
)
535 _ixgbe_reset_tx_queue_vec(txq
);
538 static const struct ixgbe_txq_ops vec_txq_ops
= {
539 .release_mbufs
= ixgbe_tx_queue_release_mbufs_vec
,
540 .free_swring
= ixgbe_tx_free_swring
,
541 .reset
= ixgbe_reset_tx_queue
,
544 int __attribute__((cold
))
545 ixgbe_rxq_vec_setup(struct ixgbe_rx_queue
*rxq
)
547 return ixgbe_rxq_vec_setup_default(rxq
);
550 int __attribute__((cold
))
551 ixgbe_txq_vec_setup(struct ixgbe_tx_queue
*txq
)
553 return ixgbe_txq_vec_setup_default(txq
, &vec_txq_ops
);
556 int __attribute__((cold
))
557 ixgbe_rx_vec_dev_conf_condition_check(struct rte_eth_dev
*dev
)
559 struct rte_eth_rxmode
*rxmode
= &dev
->data
->dev_conf
.rxmode
;
561 /* no csum error report support */
562 if (rxmode
->hw_ip_checksum
== 1)
565 return ixgbe_rx_vec_dev_conf_condition_check_default(dev
);