1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2017 Intel Corporation
6 #include <rte_ethdev_driver.h>
7 #include <rte_malloc.h>
9 #include "base/i40e_prototype.h"
10 #include "base/i40e_type.h"
11 #include "i40e_ethdev.h"
12 #include "i40e_rxtx.h"
13 #include "i40e_rxtx_vec_common.h"
15 #include <x86intrin.h>
17 #ifndef __INTEL_COMPILER
18 #pragma GCC diagnostic ignored "-Wcast-qual"
22 i40e_rxq_rearm(struct i40e_rx_queue
*rxq
)
26 volatile union i40e_rx_desc
*rxdp
;
27 struct i40e_rx_entry
*rxep
= &rxq
->sw_ring
[rxq
->rxrearm_start
];
29 rxdp
= rxq
->rx_ring
+ rxq
->rxrearm_start
;
31 /* Pull 'n' more MBUFs into the software ring */
32 if (rte_mempool_get_bulk(rxq
->mp
,
34 RTE_I40E_RXQ_REARM_THRESH
) < 0) {
35 if (rxq
->rxrearm_nb
+ RTE_I40E_RXQ_REARM_THRESH
>=
38 dma_addr0
= _mm_setzero_si128();
39 for (i
= 0; i
< RTE_I40E_DESCS_PER_LOOP
; i
++) {
40 rxep
[i
].mbuf
= &rxq
->fake_mbuf
;
41 _mm_store_si128((__m128i
*)&rxdp
[i
].read
,
45 rte_eth_devices
[rxq
->port_id
].data
->rx_mbuf_alloc_failed
+=
46 RTE_I40E_RXQ_REARM_THRESH
;
50 #ifndef RTE_LIBRTE_I40E_16BYTE_RX_DESC
51 struct rte_mbuf
*mb0
, *mb1
;
52 __m128i dma_addr0
, dma_addr1
;
53 __m128i hdr_room
= _mm_set_epi64x(RTE_PKTMBUF_HEADROOM
,
54 RTE_PKTMBUF_HEADROOM
);
55 /* Initialize the mbufs in vector, process 2 mbufs in one loop */
56 for (i
= 0; i
< RTE_I40E_RXQ_REARM_THRESH
; i
+= 2, rxep
+= 2) {
57 __m128i vaddr0
, vaddr1
;
62 /* load buf_addr(lo 64bit) and buf_physaddr(hi 64bit) */
63 RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf
, buf_physaddr
) !=
64 offsetof(struct rte_mbuf
, buf_addr
) + 8);
65 vaddr0
= _mm_loadu_si128((__m128i
*)&mb0
->buf_addr
);
66 vaddr1
= _mm_loadu_si128((__m128i
*)&mb1
->buf_addr
);
68 /* convert pa to dma_addr hdr/data */
69 dma_addr0
= _mm_unpackhi_epi64(vaddr0
, vaddr0
);
70 dma_addr1
= _mm_unpackhi_epi64(vaddr1
, vaddr1
);
72 /* add headroom to pa values */
73 dma_addr0
= _mm_add_epi64(dma_addr0
, hdr_room
);
74 dma_addr1
= _mm_add_epi64(dma_addr1
, hdr_room
);
76 /* flush desc with pa dma_addr */
77 _mm_store_si128((__m128i
*)&rxdp
++->read
, dma_addr0
);
78 _mm_store_si128((__m128i
*)&rxdp
++->read
, dma_addr1
);
81 struct rte_mbuf
*mb0
, *mb1
, *mb2
, *mb3
;
82 __m256i dma_addr0_1
, dma_addr2_3
;
83 __m256i hdr_room
= _mm256_set1_epi64x(RTE_PKTMBUF_HEADROOM
);
84 /* Initialize the mbufs in vector, process 4 mbufs in one loop */
85 for (i
= 0; i
< RTE_I40E_RXQ_REARM_THRESH
;
86 i
+= 4, rxep
+= 4, rxdp
+= 4) {
87 __m128i vaddr0
, vaddr1
, vaddr2
, vaddr3
;
88 __m256i vaddr0_1
, vaddr2_3
;
95 /* load buf_addr(lo 64bit) and buf_physaddr(hi 64bit) */
96 RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf
, buf_physaddr
) !=
97 offsetof(struct rte_mbuf
, buf_addr
) + 8);
98 vaddr0
= _mm_loadu_si128((__m128i
*)&mb0
->buf_addr
);
99 vaddr1
= _mm_loadu_si128((__m128i
*)&mb1
->buf_addr
);
100 vaddr2
= _mm_loadu_si128((__m128i
*)&mb2
->buf_addr
);
101 vaddr3
= _mm_loadu_si128((__m128i
*)&mb3
->buf_addr
);
104 * merge 0 & 1, by casting 0 to 256-bit and inserting 1
105 * into the high lanes. Similarly for 2 & 3
107 vaddr0_1
= _mm256_inserti128_si256(
108 _mm256_castsi128_si256(vaddr0
), vaddr1
, 1);
109 vaddr2_3
= _mm256_inserti128_si256(
110 _mm256_castsi128_si256(vaddr2
), vaddr3
, 1);
112 /* convert pa to dma_addr hdr/data */
113 dma_addr0_1
= _mm256_unpackhi_epi64(vaddr0_1
, vaddr0_1
);
114 dma_addr2_3
= _mm256_unpackhi_epi64(vaddr2_3
, vaddr2_3
);
116 /* add headroom to pa values */
117 dma_addr0_1
= _mm256_add_epi64(dma_addr0_1
, hdr_room
);
118 dma_addr2_3
= _mm256_add_epi64(dma_addr2_3
, hdr_room
);
120 /* flush desc with pa dma_addr */
121 _mm256_store_si256((__m256i
*)&rxdp
->read
, dma_addr0_1
);
122 _mm256_store_si256((__m256i
*)&(rxdp
+ 2)->read
, dma_addr2_3
);
127 rxq
->rxrearm_start
+= RTE_I40E_RXQ_REARM_THRESH
;
128 if (rxq
->rxrearm_start
>= rxq
->nb_rx_desc
)
129 rxq
->rxrearm_start
= 0;
131 rxq
->rxrearm_nb
-= RTE_I40E_RXQ_REARM_THRESH
;
133 rx_id
= (uint16_t)((rxq
->rxrearm_start
== 0) ?
134 (rxq
->nb_rx_desc
- 1) : (rxq
->rxrearm_start
- 1));
136 /* Update the tail pointer on the NIC */
137 I40E_PCI_REG_WRITE(rxq
->qrx_tail
, rx_id
);
140 #ifndef RTE_LIBRTE_I40E_16BYTE_RX_DESC
141 /* Handles 32B descriptor FDIR ID processing:
142 * rxdp: receive descriptor ring, required to load 2nd 16B half of each desc
143 * rx_pkts: required to store metadata back to mbufs
144 * pkt_idx: offset into the burst, increments in vector widths
145 * desc_idx: required to select the correct shift at compile time
147 static inline __m256i
148 desc_fdir_processing_32b(volatile union i40e_rx_desc
*rxdp
,
149 struct rte_mbuf
**rx_pkts
,
150 const uint32_t pkt_idx
,
151 const uint32_t desc_idx
)
153 /* 32B desc path: load rxdp.wb.qword2 for EXT_STATUS and FLEXBH_STAT */
154 __m128i
*rxdp_desc_0
= (void *)(&rxdp
[desc_idx
+ 0].wb
.qword2
);
155 __m128i
*rxdp_desc_1
= (void *)(&rxdp
[desc_idx
+ 1].wb
.qword2
);
156 const __m128i desc_qw2_0
= _mm_load_si128(rxdp_desc_0
);
157 const __m128i desc_qw2_1
= _mm_load_si128(rxdp_desc_1
);
159 /* Mask for FLEXBH_STAT, and the FDIR_ID value to compare against. The
160 * remaining data is set to all 1's to pass through data.
162 const __m256i flexbh_mask
= _mm256_set_epi32(-1, -1, -1, 3 << 4,
164 const __m256i flexbh_id
= _mm256_set_epi32(-1, -1, -1, 1 << 4,
167 /* Load descriptor, check for FLEXBH bits, generate a mask for both
168 * packets in the register.
170 __m256i desc_qw2_0_1
=
171 _mm256_inserti128_si256(_mm256_castsi128_si256(desc_qw2_0
),
173 __m256i desc_tmp_msk
= _mm256_and_si256(flexbh_mask
, desc_qw2_0_1
);
174 __m256i fdir_mask
= _mm256_cmpeq_epi32(flexbh_id
, desc_tmp_msk
);
175 __m256i fdir_data
= _mm256_alignr_epi8(desc_qw2_0_1
, desc_qw2_0_1
, 12);
176 __m256i desc_fdir_data
= _mm256_and_si256(fdir_mask
, fdir_data
);
178 /* Write data out to the mbuf. There is no store to this area of the
179 * mbuf today, so we cannot combine it with another store.
181 const uint32_t idx_0
= pkt_idx
+ desc_idx
;
182 const uint32_t idx_1
= pkt_idx
+ desc_idx
+ 1;
183 rx_pkts
[idx_0
]->hash
.fdir
.hi
= _mm256_extract_epi32(desc_fdir_data
, 0);
184 rx_pkts
[idx_1
]->hash
.fdir
.hi
= _mm256_extract_epi32(desc_fdir_data
, 4);
186 /* Create mbuf flags as required for mbuf_flags layout
187 * (That's high lane [1,3,5,7, 0,2,4,6] as u32 lanes).
189 * - Mask away bits not required from the fdir_mask
190 * - Leave the PKT_FDIR_ID bit (1 << 13)
191 * - Position that bit correctly based on packet number
192 * - OR in the resulting bit to mbuf_flags
194 RTE_BUILD_BUG_ON(PKT_RX_FDIR_ID
!= (1 << 13));
195 __m256i mbuf_flag_mask
= _mm256_set_epi32(0, 0, 0, 1 << 13,
197 __m256i desc_flag_bit
= _mm256_and_si256(mbuf_flag_mask
, fdir_mask
);
199 /* For static-inline function, this will be stripped out
200 * as the desc_idx is a hard-coded constant.
204 return _mm256_alignr_epi8(desc_flag_bit
, desc_flag_bit
, 4);
206 return _mm256_alignr_epi8(desc_flag_bit
, desc_flag_bit
, 8);
208 return _mm256_alignr_epi8(desc_flag_bit
, desc_flag_bit
, 12);
210 return desc_flag_bit
;
215 /* NOT REACHED, see above switch returns */
216 return _mm256_setzero_si256();
218 #endif /* RTE_LIBRTE_I40E_16BYTE_RX_DESC */
220 #define PKTLEN_SHIFT 10
222 /* Force inline as some compilers will not inline by default. */
223 static __rte_always_inline
uint16_t
224 _recv_raw_pkts_vec_avx2(struct i40e_rx_queue
*rxq
, struct rte_mbuf
**rx_pkts
,
225 uint16_t nb_pkts
, uint8_t *split_packet
)
227 #define RTE_I40E_DESCS_PER_LOOP_AVX 8
229 const uint32_t *ptype_tbl
= rxq
->vsi
->adapter
->ptype_tbl
;
230 const __m256i mbuf_init
= _mm256_set_epi64x(0, 0,
231 0, rxq
->mbuf_initializer
);
232 struct i40e_rx_entry
*sw_ring
= &rxq
->sw_ring
[rxq
->rx_tail
];
233 volatile union i40e_rx_desc
*rxdp
= rxq
->rx_ring
+ rxq
->rx_tail
;
234 const int avx_aligned
= ((rxq
->rx_tail
& 1) == 0);
237 /* nb_pkts has to be floor-aligned to RTE_I40E_DESCS_PER_LOOP_AVX */
238 nb_pkts
= RTE_ALIGN_FLOOR(nb_pkts
, RTE_I40E_DESCS_PER_LOOP_AVX
);
240 /* See if we need to rearm the RX queue - gives the prefetch a bit
243 if (rxq
->rxrearm_nb
> RTE_I40E_RXQ_REARM_THRESH
)
246 /* Before we start moving massive data around, check to see if
247 * there is actually a packet available
249 if (!(rxdp
->wb
.qword1
.status_error_len
&
250 rte_cpu_to_le_32(1 << I40E_RX_DESC_STATUS_DD_SHIFT
)))
253 /* constants used in processing loop */
254 const __m256i crc_adjust
= _mm256_set_epi16(
255 /* first descriptor */
256 0, 0, 0, /* ignore non-length fields */
257 -rxq
->crc_len
, /* sub crc on data_len */
258 0, /* ignore high-16bits of pkt_len */
259 -rxq
->crc_len
, /* sub crc on pkt_len */
260 0, 0, /* ignore pkt_type field */
261 /* second descriptor */
262 0, 0, 0, /* ignore non-length fields */
263 -rxq
->crc_len
, /* sub crc on data_len */
264 0, /* ignore high-16bits of pkt_len */
265 -rxq
->crc_len
, /* sub crc on pkt_len */
266 0, 0 /* ignore pkt_type field */
269 /* 8 packets DD mask, LSB in each 32-bit value */
270 const __m256i dd_check
= _mm256_set1_epi32(1);
272 /* 8 packets EOP mask, second-LSB in each 32-bit value */
273 const __m256i eop_check
= _mm256_slli_epi32(dd_check
,
274 I40E_RX_DESC_STATUS_EOF_SHIFT
);
276 /* mask to shuffle from desc. to mbuf (2 descriptors)*/
277 const __m256i shuf_msk
= _mm256_set_epi8(
278 /* first descriptor */
279 7, 6, 5, 4, /* octet 4~7, 32bits rss */
280 3, 2, /* octet 2~3, low 16 bits vlan_macip */
281 15, 14, /* octet 15~14, 16 bits data_len */
282 0xFF, 0xFF, /* skip high 16 bits pkt_len, zero out */
283 15, 14, /* octet 15~14, low 16 bits pkt_len */
284 0xFF, 0xFF, /* pkt_type set as unknown */
285 0xFF, 0xFF, /*pkt_type set as unknown */
286 /* second descriptor */
287 7, 6, 5, 4, /* octet 4~7, 32bits rss */
288 3, 2, /* octet 2~3, low 16 bits vlan_macip */
289 15, 14, /* octet 15~14, 16 bits data_len */
290 0xFF, 0xFF, /* skip high 16 bits pkt_len, zero out */
291 15, 14, /* octet 15~14, low 16 bits pkt_len */
292 0xFF, 0xFF, /* pkt_type set as unknown */
293 0xFF, 0xFF /*pkt_type set as unknown */
296 * compile-time check the above crc and shuffle layout is correct.
297 * NOTE: the first field (lowest address) is given last in set_epi
300 RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf
, pkt_len
) !=
301 offsetof(struct rte_mbuf
, rx_descriptor_fields1
) + 4);
302 RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf
, data_len
) !=
303 offsetof(struct rte_mbuf
, rx_descriptor_fields1
) + 8);
304 RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf
, vlan_tci
) !=
305 offsetof(struct rte_mbuf
, rx_descriptor_fields1
) + 10);
306 RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf
, hash
) !=
307 offsetof(struct rte_mbuf
, rx_descriptor_fields1
) + 12);
309 /* Status/Error flag masks */
311 * mask everything except RSS, flow director and VLAN flags
312 * bit2 is for VLAN tag, bit11 for flow director indication
313 * bit13:12 for RSS indication. Bits 3-5 of error
314 * field (bits 22-24) are for IP/L4 checksum errors
316 const __m256i flags_mask
= _mm256_set1_epi32(
317 (1 << 2) | (1 << 11) | (3 << 12) | (7 << 22));
319 * data to be shuffled by result of flag mask. If VLAN bit is set,
320 * (bit 2), then position 4 in this array will be used in the
323 const __m256i vlan_flags_shuf
= _mm256_set_epi32(
324 0, 0, PKT_RX_VLAN
| PKT_RX_VLAN_STRIPPED
, 0,
325 0, 0, PKT_RX_VLAN
| PKT_RX_VLAN_STRIPPED
, 0);
327 * data to be shuffled by result of flag mask, shifted down 11.
328 * If RSS/FDIR bits are set, shuffle moves appropriate flags in
331 const __m256i rss_flags_shuf
= _mm256_set_epi8(
332 0, 0, 0, 0, 0, 0, 0, 0,
333 PKT_RX_RSS_HASH
| PKT_RX_FDIR
, PKT_RX_RSS_HASH
, 0, 0,
334 0, 0, PKT_RX_FDIR
, 0, /* end up 128-bits */
335 0, 0, 0, 0, 0, 0, 0, 0,
336 PKT_RX_RSS_HASH
| PKT_RX_FDIR
, PKT_RX_RSS_HASH
, 0, 0,
337 0, 0, PKT_RX_FDIR
, 0);
340 * data to be shuffled by the result of the flags mask shifted by 22
341 * bits. This gives use the l3_l4 flags.
343 const __m256i l3_l4_flags_shuf
= _mm256_set_epi8(0, 0, 0, 0, 0, 0, 0, 0,
344 /* shift right 1 bit to make sure it not exceed 255 */
345 (PKT_RX_EIP_CKSUM_BAD
| PKT_RX_L4_CKSUM_BAD
| PKT_RX_IP_CKSUM_BAD
) >> 1,
346 (PKT_RX_IP_CKSUM_GOOD
| PKT_RX_EIP_CKSUM_BAD
| PKT_RX_L4_CKSUM_BAD
) >> 1,
347 (PKT_RX_EIP_CKSUM_BAD
| PKT_RX_IP_CKSUM_BAD
) >> 1,
348 (PKT_RX_IP_CKSUM_GOOD
| PKT_RX_EIP_CKSUM_BAD
) >> 1,
349 (PKT_RX_L4_CKSUM_BAD
| PKT_RX_IP_CKSUM_BAD
) >> 1,
350 (PKT_RX_IP_CKSUM_GOOD
| PKT_RX_L4_CKSUM_BAD
) >> 1,
351 PKT_RX_IP_CKSUM_BAD
>> 1,
352 (PKT_RX_IP_CKSUM_GOOD
| PKT_RX_L4_CKSUM_GOOD
) >> 1,
353 /* second 128-bits */
354 0, 0, 0, 0, 0, 0, 0, 0,
355 (PKT_RX_EIP_CKSUM_BAD
| PKT_RX_L4_CKSUM_BAD
| PKT_RX_IP_CKSUM_BAD
) >> 1,
356 (PKT_RX_IP_CKSUM_GOOD
| PKT_RX_EIP_CKSUM_BAD
| PKT_RX_L4_CKSUM_BAD
) >> 1,
357 (PKT_RX_EIP_CKSUM_BAD
| PKT_RX_IP_CKSUM_BAD
) >> 1,
358 (PKT_RX_IP_CKSUM_GOOD
| PKT_RX_EIP_CKSUM_BAD
) >> 1,
359 (PKT_RX_L4_CKSUM_BAD
| PKT_RX_IP_CKSUM_BAD
) >> 1,
360 (PKT_RX_IP_CKSUM_GOOD
| PKT_RX_L4_CKSUM_BAD
) >> 1,
361 PKT_RX_IP_CKSUM_BAD
>> 1,
362 (PKT_RX_IP_CKSUM_GOOD
| PKT_RX_L4_CKSUM_GOOD
) >> 1);
364 const __m256i cksum_mask
= _mm256_set1_epi32(
365 PKT_RX_IP_CKSUM_GOOD
| PKT_RX_IP_CKSUM_BAD
|
366 PKT_RX_L4_CKSUM_GOOD
| PKT_RX_L4_CKSUM_BAD
|
367 PKT_RX_EIP_CKSUM_BAD
);
369 RTE_SET_USED(avx_aligned
); /* for 32B descriptors we don't use this */
371 uint16_t i
, received
;
372 for (i
= 0, received
= 0; i
< nb_pkts
;
373 i
+= RTE_I40E_DESCS_PER_LOOP_AVX
,
374 rxdp
+= RTE_I40E_DESCS_PER_LOOP_AVX
) {
375 /* step 1, copy over 8 mbuf pointers to rx_pkts array */
376 _mm256_storeu_si256((void *)&rx_pkts
[i
],
377 _mm256_loadu_si256((void *)&sw_ring
[i
]));
378 #ifdef RTE_ARCH_X86_64
379 _mm256_storeu_si256((void *)&rx_pkts
[i
+ 4],
380 _mm256_loadu_si256((void *)&sw_ring
[i
+ 4]));
383 __m256i raw_desc0_1
, raw_desc2_3
, raw_desc4_5
, raw_desc6_7
;
384 #ifdef RTE_LIBRTE_I40E_16BYTE_RX_DESC
385 /* for AVX we need alignment otherwise loads are not atomic */
387 /* load in descriptors, 2 at a time, in reverse order */
388 raw_desc6_7
= _mm256_load_si256((void *)(rxdp
+ 6));
389 rte_compiler_barrier();
390 raw_desc4_5
= _mm256_load_si256((void *)(rxdp
+ 4));
391 rte_compiler_barrier();
392 raw_desc2_3
= _mm256_load_si256((void *)(rxdp
+ 2));
393 rte_compiler_barrier();
394 raw_desc0_1
= _mm256_load_si256((void *)(rxdp
+ 0));
398 const __m128i raw_desc7
= _mm_load_si128((void *)(rxdp
+ 7));
399 rte_compiler_barrier();
400 const __m128i raw_desc6
= _mm_load_si128((void *)(rxdp
+ 6));
401 rte_compiler_barrier();
402 const __m128i raw_desc5
= _mm_load_si128((void *)(rxdp
+ 5));
403 rte_compiler_barrier();
404 const __m128i raw_desc4
= _mm_load_si128((void *)(rxdp
+ 4));
405 rte_compiler_barrier();
406 const __m128i raw_desc3
= _mm_load_si128((void *)(rxdp
+ 3));
407 rte_compiler_barrier();
408 const __m128i raw_desc2
= _mm_load_si128((void *)(rxdp
+ 2));
409 rte_compiler_barrier();
410 const __m128i raw_desc1
= _mm_load_si128((void *)(rxdp
+ 1));
411 rte_compiler_barrier();
412 const __m128i raw_desc0
= _mm_load_si128((void *)(rxdp
+ 0));
414 raw_desc6_7
= _mm256_inserti128_si256(
415 _mm256_castsi128_si256(raw_desc6
), raw_desc7
, 1);
416 raw_desc4_5
= _mm256_inserti128_si256(
417 _mm256_castsi128_si256(raw_desc4
), raw_desc5
, 1);
418 raw_desc2_3
= _mm256_inserti128_si256(
419 _mm256_castsi128_si256(raw_desc2
), raw_desc3
, 1);
420 raw_desc0_1
= _mm256_inserti128_si256(
421 _mm256_castsi128_si256(raw_desc0
), raw_desc1
, 1);
426 for (j
= 0; j
< RTE_I40E_DESCS_PER_LOOP_AVX
; j
++)
427 rte_mbuf_prefetch_part2(rx_pkts
[i
+ j
]);
431 * convert descriptors 4-7 into mbufs, adjusting length and
432 * re-arranging fields. Then write into the mbuf
434 const __m256i len6_7
= _mm256_slli_epi32(raw_desc6_7
, PKTLEN_SHIFT
);
435 const __m256i len4_5
= _mm256_slli_epi32(raw_desc4_5
, PKTLEN_SHIFT
);
436 const __m256i desc6_7
= _mm256_blend_epi16(raw_desc6_7
, len6_7
, 0x80);
437 const __m256i desc4_5
= _mm256_blend_epi16(raw_desc4_5
, len4_5
, 0x80);
438 __m256i mb6_7
= _mm256_shuffle_epi8(desc6_7
, shuf_msk
);
439 __m256i mb4_5
= _mm256_shuffle_epi8(desc4_5
, shuf_msk
);
440 mb6_7
= _mm256_add_epi16(mb6_7
, crc_adjust
);
441 mb4_5
= _mm256_add_epi16(mb4_5
, crc_adjust
);
443 * to get packet types, shift 64-bit values down 30 bits
444 * and so ptype is in lower 8-bits in each
446 const __m256i ptypes6_7
= _mm256_srli_epi64(desc6_7
, 30);
447 const __m256i ptypes4_5
= _mm256_srli_epi64(desc4_5
, 30);
448 const uint8_t ptype7
= _mm256_extract_epi8(ptypes6_7
, 24);
449 const uint8_t ptype6
= _mm256_extract_epi8(ptypes6_7
, 8);
450 const uint8_t ptype5
= _mm256_extract_epi8(ptypes4_5
, 24);
451 const uint8_t ptype4
= _mm256_extract_epi8(ptypes4_5
, 8);
452 mb6_7
= _mm256_insert_epi32(mb6_7
, ptype_tbl
[ptype7
], 4);
453 mb6_7
= _mm256_insert_epi32(mb6_7
, ptype_tbl
[ptype6
], 0);
454 mb4_5
= _mm256_insert_epi32(mb4_5
, ptype_tbl
[ptype5
], 4);
455 mb4_5
= _mm256_insert_epi32(mb4_5
, ptype_tbl
[ptype4
], 0);
456 /* merge the status bits into one register */
457 const __m256i status4_7
= _mm256_unpackhi_epi32(desc6_7
,
461 * convert descriptors 0-3 into mbufs, adjusting length and
462 * re-arranging fields. Then write into the mbuf
464 const __m256i len2_3
= _mm256_slli_epi32(raw_desc2_3
, PKTLEN_SHIFT
);
465 const __m256i len0_1
= _mm256_slli_epi32(raw_desc0_1
, PKTLEN_SHIFT
);
466 const __m256i desc2_3
= _mm256_blend_epi16(raw_desc2_3
, len2_3
, 0x80);
467 const __m256i desc0_1
= _mm256_blend_epi16(raw_desc0_1
, len0_1
, 0x80);
468 __m256i mb2_3
= _mm256_shuffle_epi8(desc2_3
, shuf_msk
);
469 __m256i mb0_1
= _mm256_shuffle_epi8(desc0_1
, shuf_msk
);
470 mb2_3
= _mm256_add_epi16(mb2_3
, crc_adjust
);
471 mb0_1
= _mm256_add_epi16(mb0_1
, crc_adjust
);
472 /* get the packet types */
473 const __m256i ptypes2_3
= _mm256_srli_epi64(desc2_3
, 30);
474 const __m256i ptypes0_1
= _mm256_srli_epi64(desc0_1
, 30);
475 const uint8_t ptype3
= _mm256_extract_epi8(ptypes2_3
, 24);
476 const uint8_t ptype2
= _mm256_extract_epi8(ptypes2_3
, 8);
477 const uint8_t ptype1
= _mm256_extract_epi8(ptypes0_1
, 24);
478 const uint8_t ptype0
= _mm256_extract_epi8(ptypes0_1
, 8);
479 mb2_3
= _mm256_insert_epi32(mb2_3
, ptype_tbl
[ptype3
], 4);
480 mb2_3
= _mm256_insert_epi32(mb2_3
, ptype_tbl
[ptype2
], 0);
481 mb0_1
= _mm256_insert_epi32(mb0_1
, ptype_tbl
[ptype1
], 4);
482 mb0_1
= _mm256_insert_epi32(mb0_1
, ptype_tbl
[ptype0
], 0);
483 /* merge the status bits into one register */
484 const __m256i status0_3
= _mm256_unpackhi_epi32(desc2_3
,
488 * take the two sets of status bits and merge to one
489 * After merge, the packets status flags are in the
490 * order (hi->lo): [1, 3, 5, 7, 0, 2, 4, 6]
492 __m256i status0_7
= _mm256_unpacklo_epi64(status4_7
,
495 /* now do flag manipulation */
497 /* get only flag/error bits we want */
498 const __m256i flag_bits
= _mm256_and_si256(
499 status0_7
, flags_mask
);
500 /* set vlan and rss flags */
501 const __m256i vlan_flags
= _mm256_shuffle_epi8(
502 vlan_flags_shuf
, flag_bits
);
503 const __m256i rss_fdir_bits
= _mm256_srli_epi32(flag_bits
, 11);
504 const __m256i rss_flags
= _mm256_shuffle_epi8(rss_flags_shuf
,
508 * l3_l4_error flags, shuffle, then shift to correct adjustment
509 * of flags in flags_shuf, and finally mask out extra bits
511 __m256i l3_l4_flags
= _mm256_shuffle_epi8(l3_l4_flags_shuf
,
512 _mm256_srli_epi32(flag_bits
, 22));
513 l3_l4_flags
= _mm256_slli_epi32(l3_l4_flags
, 1);
514 l3_l4_flags
= _mm256_and_si256(l3_l4_flags
, cksum_mask
);
517 __m256i mbuf_flags
= _mm256_or_si256(l3_l4_flags
,
518 _mm256_or_si256(rss_flags
, vlan_flags
));
520 /* If the rxq has FDIR enabled, read and process the FDIR info
521 * from the descriptor. This can cause more loads/stores, so is
522 * not always performed. Branch over the code when not enabled.
524 if (rxq
->fdir_enabled
) {
525 #ifdef RTE_LIBRTE_I40E_16BYTE_RX_DESC
526 /* 16B descriptor code path:
527 * RSS and FDIR ID use the same offset in the desc, so
528 * only one can be present at a time. The code below
529 * identifies an FDIR ID match, and zeros the RSS value
530 * in the mbuf on FDIR match to keep mbuf data clean.
532 #define FDIR_BLEND_MASK ((1 << 3) | (1 << 7))
535 * - Take flags, shift bits to null out
536 * - CMPEQ with known FDIR ID, to get 0xFFFF or 0 mask
537 * - Strip bits from mask, leaving 0 or 1 for FDIR ID
538 * - Merge with mbuf_flags
540 /* FLM = 1, FLTSTAT = 0b01, (FLM | FLTSTAT) == 3.
541 * Shift left by 28 to avoid having to mask.
543 const __m256i fdir
= _mm256_slli_epi32(rss_fdir_bits
, 28);
544 const __m256i fdir_id
= _mm256_set1_epi32(3 << 28);
546 /* As above, the fdir_mask to packet mapping is this:
547 * order (hi->lo): [1, 3, 5, 7, 0, 2, 4, 6]
548 * Then OR FDIR flags to mbuf_flags on FDIR ID hit.
550 RTE_BUILD_BUG_ON(PKT_RX_FDIR_ID
!= (1 << 13));
551 const __m256i pkt_fdir_bit
= _mm256_set1_epi32(1 << 13);
552 const __m256i fdir_mask
= _mm256_cmpeq_epi32(fdir
, fdir_id
);
553 __m256i fdir_bits
= _mm256_and_si256(fdir_mask
, pkt_fdir_bit
);
554 mbuf_flags
= _mm256_or_si256(mbuf_flags
, fdir_bits
);
556 /* Based on FDIR_MASK, clear the RSS or FDIR value.
557 * The FDIR ID value is masked to zero if not a hit,
558 * otherwise the mb0_1 register RSS field is zeroed.
560 const __m256i fdir_zero_mask
= _mm256_setzero_si256();
561 __m256i tmp0_1
= _mm256_blend_epi32(fdir_zero_mask
,
562 fdir_mask
, FDIR_BLEND_MASK
);
563 __m256i fdir_mb0_1
= _mm256_and_si256(mb0_1
, fdir_mask
);
564 mb0_1
= _mm256_andnot_si256(tmp0_1
, mb0_1
);
566 /* Write to mbuf: no stores to combine with, so just a
567 * scalar store to push data here.
569 rx_pkts
[i
+ 0]->hash
.fdir
.hi
= _mm256_extract_epi32(fdir_mb0_1
, 3);
570 rx_pkts
[i
+ 1]->hash
.fdir
.hi
= _mm256_extract_epi32(fdir_mb0_1
, 7);
572 /* Same as above, only shift the fdir_mask to align
573 * the packet FDIR mask with the FDIR_ID desc lane.
575 __m256i tmp2_3
= _mm256_alignr_epi8(fdir_mask
, fdir_mask
, 12);
576 __m256i fdir_mb2_3
= _mm256_and_si256(mb2_3
, tmp2_3
);
577 tmp2_3
= _mm256_blend_epi32(fdir_zero_mask
, tmp2_3
,
579 mb2_3
= _mm256_andnot_si256(tmp2_3
, mb2_3
);
580 rx_pkts
[i
+ 2]->hash
.fdir
.hi
= _mm256_extract_epi32(fdir_mb2_3
, 3);
581 rx_pkts
[i
+ 3]->hash
.fdir
.hi
= _mm256_extract_epi32(fdir_mb2_3
, 7);
583 __m256i tmp4_5
= _mm256_alignr_epi8(fdir_mask
, fdir_mask
, 8);
584 __m256i fdir_mb4_5
= _mm256_and_si256(mb4_5
, tmp4_5
);
585 tmp4_5
= _mm256_blend_epi32(fdir_zero_mask
, tmp4_5
,
587 mb4_5
= _mm256_andnot_si256(tmp4_5
, mb4_5
);
588 rx_pkts
[i
+ 4]->hash
.fdir
.hi
= _mm256_extract_epi32(fdir_mb4_5
, 3);
589 rx_pkts
[i
+ 5]->hash
.fdir
.hi
= _mm256_extract_epi32(fdir_mb4_5
, 7);
591 __m256i tmp6_7
= _mm256_alignr_epi8(fdir_mask
, fdir_mask
, 4);
592 __m256i fdir_mb6_7
= _mm256_and_si256(mb6_7
, tmp6_7
);
593 tmp6_7
= _mm256_blend_epi32(fdir_zero_mask
, tmp6_7
,
595 mb6_7
= _mm256_andnot_si256(tmp6_7
, mb6_7
);
596 rx_pkts
[i
+ 6]->hash
.fdir
.hi
= _mm256_extract_epi32(fdir_mb6_7
, 3);
597 rx_pkts
[i
+ 7]->hash
.fdir
.hi
= _mm256_extract_epi32(fdir_mb6_7
, 7);
599 /* End of 16B descriptor handling */
601 /* 32B descriptor FDIR ID mark handling. Returns bits
602 * to be OR-ed into the mbuf olflags.
604 __m256i fdir_add_flags
;
605 fdir_add_flags
= desc_fdir_processing_32b(rxdp
, rx_pkts
, i
, 0);
606 mbuf_flags
= _mm256_or_si256(mbuf_flags
, fdir_add_flags
);
608 fdir_add_flags
= desc_fdir_processing_32b(rxdp
, rx_pkts
, i
, 2);
609 mbuf_flags
= _mm256_or_si256(mbuf_flags
, fdir_add_flags
);
611 fdir_add_flags
= desc_fdir_processing_32b(rxdp
, rx_pkts
, i
, 4);
612 mbuf_flags
= _mm256_or_si256(mbuf_flags
, fdir_add_flags
);
614 fdir_add_flags
= desc_fdir_processing_32b(rxdp
, rx_pkts
, i
, 6);
615 mbuf_flags
= _mm256_or_si256(mbuf_flags
, fdir_add_flags
);
616 /* End 32B desc handling */
617 #endif /* RTE_LIBRTE_I40E_16BYTE_RX_DESC */
619 } /* if() on FDIR enabled */
622 * At this point, we have the 8 sets of flags in the low 16-bits
623 * of each 32-bit value in vlan0.
624 * We want to extract these, and merge them with the mbuf init data
625 * so we can do a single write to the mbuf to set the flags
626 * and all the other initialization fields. Extracting the
627 * appropriate flags means that we have to do a shift and blend for
628 * each mbuf before we do the write. However, we can also
629 * add in the previously computed rx_descriptor fields to
630 * make a single 256-bit write per mbuf
632 /* check the structure matches expectations */
633 RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf
, ol_flags
) !=
634 offsetof(struct rte_mbuf
, rearm_data
) + 8);
635 RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf
, rearm_data
) !=
636 RTE_ALIGN(offsetof(struct rte_mbuf
, rearm_data
), 16));
637 /* build up data and do writes */
638 __m256i rearm0
, rearm1
, rearm2
, rearm3
, rearm4
, rearm5
,
640 rearm6
= _mm256_blend_epi32(mbuf_init
, _mm256_slli_si256(mbuf_flags
, 8), 0x04);
641 rearm4
= _mm256_blend_epi32(mbuf_init
, _mm256_slli_si256(mbuf_flags
, 4), 0x04);
642 rearm2
= _mm256_blend_epi32(mbuf_init
, mbuf_flags
, 0x04);
643 rearm0
= _mm256_blend_epi32(mbuf_init
, _mm256_srli_si256(mbuf_flags
, 4), 0x04);
644 /* permute to add in the rx_descriptor e.g. rss fields */
645 rearm6
= _mm256_permute2f128_si256(rearm6
, mb6_7
, 0x20);
646 rearm4
= _mm256_permute2f128_si256(rearm4
, mb4_5
, 0x20);
647 rearm2
= _mm256_permute2f128_si256(rearm2
, mb2_3
, 0x20);
648 rearm0
= _mm256_permute2f128_si256(rearm0
, mb0_1
, 0x20);
650 _mm256_storeu_si256((__m256i
*)&rx_pkts
[i
+ 6]->rearm_data
, rearm6
);
651 _mm256_storeu_si256((__m256i
*)&rx_pkts
[i
+ 4]->rearm_data
, rearm4
);
652 _mm256_storeu_si256((__m256i
*)&rx_pkts
[i
+ 2]->rearm_data
, rearm2
);
653 _mm256_storeu_si256((__m256i
*)&rx_pkts
[i
+ 0]->rearm_data
, rearm0
);
655 /* repeat for the odd mbufs */
656 const __m256i odd_flags
= _mm256_castsi128_si256(
657 _mm256_extracti128_si256(mbuf_flags
, 1));
658 rearm7
= _mm256_blend_epi32(mbuf_init
, _mm256_slli_si256(odd_flags
, 8), 0x04);
659 rearm5
= _mm256_blend_epi32(mbuf_init
, _mm256_slli_si256(odd_flags
, 4), 0x04);
660 rearm3
= _mm256_blend_epi32(mbuf_init
, odd_flags
, 0x04);
661 rearm1
= _mm256_blend_epi32(mbuf_init
, _mm256_srli_si256(odd_flags
, 4), 0x04);
662 /* since odd mbufs are already in hi 128-bits use blend */
663 rearm7
= _mm256_blend_epi32(rearm7
, mb6_7
, 0xF0);
664 rearm5
= _mm256_blend_epi32(rearm5
, mb4_5
, 0xF0);
665 rearm3
= _mm256_blend_epi32(rearm3
, mb2_3
, 0xF0);
666 rearm1
= _mm256_blend_epi32(rearm1
, mb0_1
, 0xF0);
667 /* again write to mbufs */
668 _mm256_storeu_si256((__m256i
*)&rx_pkts
[i
+ 7]->rearm_data
, rearm7
);
669 _mm256_storeu_si256((__m256i
*)&rx_pkts
[i
+ 5]->rearm_data
, rearm5
);
670 _mm256_storeu_si256((__m256i
*)&rx_pkts
[i
+ 3]->rearm_data
, rearm3
);
671 _mm256_storeu_si256((__m256i
*)&rx_pkts
[i
+ 1]->rearm_data
, rearm1
);
673 /* extract and record EOP bit */
675 const __m128i eop_mask
= _mm_set1_epi16(
676 1 << I40E_RX_DESC_STATUS_EOF_SHIFT
);
677 const __m256i eop_bits256
= _mm256_and_si256(status0_7
,
679 /* pack status bits into a single 128-bit register */
680 const __m128i eop_bits
= _mm_packus_epi32(
681 _mm256_castsi256_si128(eop_bits256
),
682 _mm256_extractf128_si256(eop_bits256
, 1));
684 * flip bits, and mask out the EOP bit, which is now
685 * a split-packet bit i.e. !EOP, rather than EOP one.
687 __m128i split_bits
= _mm_andnot_si128(eop_bits
,
690 * eop bits are out of order, so we need to shuffle them
691 * back into order again. In doing so, only use low 8
692 * bits, which acts like another pack instruction
693 * The original order is (hi->lo): 1,3,5,7,0,2,4,6
694 * [Since we use epi8, the 16-bit positions are
695 * multiplied by 2 in the eop_shuffle value.]
697 __m128i eop_shuffle
= _mm_set_epi8(
698 0xFF, 0xFF, 0xFF, 0xFF, /* zero hi 64b */
699 0xFF, 0xFF, 0xFF, 0xFF,
700 8, 0, 10, 2, /* move values to lo 64b */
702 split_bits
= _mm_shuffle_epi8(split_bits
, eop_shuffle
);
703 *(uint64_t *)split_packet
= _mm_cvtsi128_si64(split_bits
);
704 split_packet
+= RTE_I40E_DESCS_PER_LOOP_AVX
;
707 /* perform dd_check */
708 status0_7
= _mm256_and_si256(status0_7
, dd_check
);
709 status0_7
= _mm256_packs_epi32(status0_7
,
710 _mm256_setzero_si256());
712 uint64_t burst
= __builtin_popcountll(_mm_cvtsi128_si64(
713 _mm256_extracti128_si256(status0_7
, 1)));
714 burst
+= __builtin_popcountll(_mm_cvtsi128_si64(
715 _mm256_castsi256_si128(status0_7
)));
717 if (burst
!= RTE_I40E_DESCS_PER_LOOP_AVX
)
721 /* update tail pointers */
722 rxq
->rx_tail
+= received
;
723 rxq
->rx_tail
&= (rxq
->nb_rx_desc
- 1);
724 if ((rxq
->rx_tail
& 1) == 1 && received
> 1) { /* keep avx2 aligned */
728 rxq
->rxrearm_nb
+= received
;
734 * - nb_pkts < RTE_I40E_DESCS_PER_LOOP, just return no packet
737 i40e_recv_pkts_vec_avx2(void *rx_queue
, struct rte_mbuf
**rx_pkts
,
740 return _recv_raw_pkts_vec_avx2(rx_queue
, rx_pkts
, nb_pkts
, NULL
);
744 * vPMD receive routine that reassembles single burst of 32 scattered packets
746 * - nb_pkts < RTE_I40E_DESCS_PER_LOOP, just return no packet
749 i40e_recv_scattered_burst_vec_avx2(void *rx_queue
, struct rte_mbuf
**rx_pkts
,
752 struct i40e_rx_queue
*rxq
= rx_queue
;
753 uint8_t split_flags
[RTE_I40E_VPMD_RX_BURST
] = {0};
755 /* get some new buffers */
756 uint16_t nb_bufs
= _recv_raw_pkts_vec_avx2(rxq
, rx_pkts
, nb_pkts
,
761 /* happy day case, full burst + no packets to be joined */
762 const uint64_t *split_fl64
= (uint64_t *)split_flags
;
764 if (rxq
->pkt_first_seg
== NULL
&&
765 split_fl64
[0] == 0 && split_fl64
[1] == 0 &&
766 split_fl64
[2] == 0 && split_fl64
[3] == 0)
769 /* reassemble any packets that need reassembly*/
772 if (rxq
->pkt_first_seg
== NULL
) {
773 /* find the first split flag, and only reassemble then*/
774 while (i
< nb_bufs
&& !split_flags
[i
])
778 rxq
->pkt_first_seg
= rx_pkts
[i
];
780 return i
+ reassemble_packets(rxq
, &rx_pkts
[i
], nb_bufs
- i
,
785 * vPMD receive routine that reassembles scattered packets.
786 * Main receive routine that can handle arbitrary burst sizes
788 * - nb_pkts < RTE_I40E_DESCS_PER_LOOP, just return no packet
791 i40e_recv_scattered_pkts_vec_avx2(void *rx_queue
, struct rte_mbuf
**rx_pkts
,
795 while (nb_pkts
> RTE_I40E_VPMD_RX_BURST
) {
796 uint16_t burst
= i40e_recv_scattered_burst_vec_avx2(rx_queue
,
797 rx_pkts
+ retval
, RTE_I40E_VPMD_RX_BURST
);
800 if (burst
< RTE_I40E_VPMD_RX_BURST
)
803 return retval
+ i40e_recv_scattered_burst_vec_avx2(rx_queue
,
804 rx_pkts
+ retval
, nb_pkts
);
809 vtx1(volatile struct i40e_tx_desc
*txdp
,
810 struct rte_mbuf
*pkt
, uint64_t flags
)
812 uint64_t high_qw
= (I40E_TX_DESC_DTYPE_DATA
|
813 ((uint64_t)flags
<< I40E_TXD_QW1_CMD_SHIFT
) |
814 ((uint64_t)pkt
->data_len
<< I40E_TXD_QW1_TX_BUF_SZ_SHIFT
));
816 __m128i descriptor
= _mm_set_epi64x(high_qw
,
817 pkt
->buf_physaddr
+ pkt
->data_off
);
818 _mm_store_si128((__m128i
*)txdp
, descriptor
);
822 vtx(volatile struct i40e_tx_desc
*txdp
,
823 struct rte_mbuf
**pkt
, uint16_t nb_pkts
, uint64_t flags
)
825 const uint64_t hi_qw_tmpl
= (I40E_TX_DESC_DTYPE_DATA
|
826 ((uint64_t)flags
<< I40E_TXD_QW1_CMD_SHIFT
));
828 /* if unaligned on 32-bit boundary, do one to align */
829 if (((uintptr_t)txdp
& 0x1F) != 0 && nb_pkts
!= 0) {
830 vtx1(txdp
, *pkt
, flags
);
831 nb_pkts
--, txdp
++, pkt
++;
834 /* do two at a time while possible, in bursts */
835 for (; nb_pkts
> 3; txdp
+= 4, pkt
+= 4, nb_pkts
-= 4) {
836 uint64_t hi_qw3
= hi_qw_tmpl
|
837 ((uint64_t)pkt
[3]->data_len
<< I40E_TXD_QW1_TX_BUF_SZ_SHIFT
);
838 uint64_t hi_qw2
= hi_qw_tmpl
|
839 ((uint64_t)pkt
[2]->data_len
<< I40E_TXD_QW1_TX_BUF_SZ_SHIFT
);
840 uint64_t hi_qw1
= hi_qw_tmpl
|
841 ((uint64_t)pkt
[1]->data_len
<< I40E_TXD_QW1_TX_BUF_SZ_SHIFT
);
842 uint64_t hi_qw0
= hi_qw_tmpl
|
843 ((uint64_t)pkt
[0]->data_len
<< I40E_TXD_QW1_TX_BUF_SZ_SHIFT
);
845 __m256i desc2_3
= _mm256_set_epi64x(
846 hi_qw3
, pkt
[3]->buf_physaddr
+ pkt
[3]->data_off
,
847 hi_qw2
, pkt
[2]->buf_physaddr
+ pkt
[2]->data_off
);
848 __m256i desc0_1
= _mm256_set_epi64x(
849 hi_qw1
, pkt
[1]->buf_physaddr
+ pkt
[1]->data_off
,
850 hi_qw0
, pkt
[0]->buf_physaddr
+ pkt
[0]->data_off
);
851 _mm256_store_si256((void *)(txdp
+ 2), desc2_3
);
852 _mm256_store_si256((void *)txdp
, desc0_1
);
855 /* do any last ones */
857 vtx1(txdp
, *pkt
, flags
);
858 txdp
++, pkt
++, nb_pkts
--;
862 static inline uint16_t
863 i40e_xmit_fixed_burst_vec_avx2(void *tx_queue
, struct rte_mbuf
**tx_pkts
,
866 struct i40e_tx_queue
*txq
= (struct i40e_tx_queue
*)tx_queue
;
867 volatile struct i40e_tx_desc
*txdp
;
868 struct i40e_tx_entry
*txep
;
869 uint16_t n
, nb_commit
, tx_id
;
870 uint64_t flags
= I40E_TD_CMD
;
871 uint64_t rs
= I40E_TX_DESC_CMD_RS
| I40E_TD_CMD
;
873 /* cross rx_thresh boundary is not allowed */
874 nb_pkts
= RTE_MIN(nb_pkts
, txq
->tx_rs_thresh
);
876 if (txq
->nb_tx_free
< txq
->tx_free_thresh
)
877 i40e_tx_free_bufs(txq
);
879 nb_commit
= nb_pkts
= (uint16_t)RTE_MIN(txq
->nb_tx_free
, nb_pkts
);
880 if (unlikely(nb_pkts
== 0))
883 tx_id
= txq
->tx_tail
;
884 txdp
= &txq
->tx_ring
[tx_id
];
885 txep
= &txq
->sw_ring
[tx_id
];
887 txq
->nb_tx_free
= (uint16_t)(txq
->nb_tx_free
- nb_pkts
);
889 n
= (uint16_t)(txq
->nb_tx_desc
- tx_id
);
890 if (nb_commit
>= n
) {
891 tx_backlog_entry(txep
, tx_pkts
, n
);
893 vtx(txdp
, tx_pkts
, n
- 1, flags
);
897 vtx1(txdp
, *tx_pkts
++, rs
);
899 nb_commit
= (uint16_t)(nb_commit
- n
);
902 txq
->tx_next_rs
= (uint16_t)(txq
->tx_rs_thresh
- 1);
904 /* avoid reach the end of ring */
905 txdp
= &txq
->tx_ring
[tx_id
];
906 txep
= &txq
->sw_ring
[tx_id
];
909 tx_backlog_entry(txep
, tx_pkts
, nb_commit
);
911 vtx(txdp
, tx_pkts
, nb_commit
, flags
);
913 tx_id
= (uint16_t)(tx_id
+ nb_commit
);
914 if (tx_id
> txq
->tx_next_rs
) {
915 txq
->tx_ring
[txq
->tx_next_rs
].cmd_type_offset_bsz
|=
916 rte_cpu_to_le_64(((uint64_t)I40E_TX_DESC_CMD_RS
) <<
917 I40E_TXD_QW1_CMD_SHIFT
);
919 (uint16_t)(txq
->tx_next_rs
+ txq
->tx_rs_thresh
);
922 txq
->tx_tail
= tx_id
;
924 I40E_PCI_REG_WRITE(txq
->qtx_tail
, txq
->tx_tail
);
930 i40e_xmit_pkts_vec_avx2(void *tx_queue
, struct rte_mbuf
**tx_pkts
,
934 struct i40e_tx_queue
*txq
= (struct i40e_tx_queue
*)tx_queue
;
939 num
= (uint16_t)RTE_MIN(nb_pkts
, txq
->tx_rs_thresh
);
940 ret
= i40e_xmit_fixed_burst_vec_avx2(tx_queue
, &tx_pkts
[nb_tx
],