4 * Copyright(c) 2017 Intel Corporation.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * * Neither the name of Intel Corporation nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
35 #include <rte_ethdev_driver.h>
36 #include <rte_malloc.h>
38 #include "base/i40e_prototype.h"
39 #include "base/i40e_type.h"
40 #include "i40e_ethdev.h"
41 #include "i40e_rxtx.h"
42 #include "i40e_rxtx_vec_common.h"
44 #include <x86intrin.h>
46 #ifndef __INTEL_COMPILER
47 #pragma GCC diagnostic ignored "-Wcast-qual"
51 i40e_rxq_rearm(struct i40e_rx_queue
*rxq
)
55 volatile union i40e_rx_desc
*rxdp
;
56 struct i40e_rx_entry
*rxep
= &rxq
->sw_ring
[rxq
->rxrearm_start
];
58 rxdp
= rxq
->rx_ring
+ rxq
->rxrearm_start
;
60 /* Pull 'n' more MBUFs into the software ring */
61 if (rte_mempool_get_bulk(rxq
->mp
,
63 RTE_I40E_RXQ_REARM_THRESH
) < 0) {
64 if (rxq
->rxrearm_nb
+ RTE_I40E_RXQ_REARM_THRESH
>=
67 dma_addr0
= _mm_setzero_si128();
68 for (i
= 0; i
< RTE_I40E_DESCS_PER_LOOP
; i
++) {
69 rxep
[i
].mbuf
= &rxq
->fake_mbuf
;
70 _mm_store_si128((__m128i
*)&rxdp
[i
].read
,
74 rte_eth_devices
[rxq
->port_id
].data
->rx_mbuf_alloc_failed
+=
75 RTE_I40E_RXQ_REARM_THRESH
;
79 #ifndef RTE_LIBRTE_I40E_16BYTE_RX_DESC
80 struct rte_mbuf
*mb0
, *mb1
;
81 __m128i dma_addr0
, dma_addr1
;
82 __m128i hdr_room
= _mm_set_epi64x(RTE_PKTMBUF_HEADROOM
,
83 RTE_PKTMBUF_HEADROOM
);
84 /* Initialize the mbufs in vector, process 2 mbufs in one loop */
85 for (i
= 0; i
< RTE_I40E_RXQ_REARM_THRESH
; i
+= 2, rxep
+= 2) {
86 __m128i vaddr0
, vaddr1
;
91 /* load buf_addr(lo 64bit) and buf_physaddr(hi 64bit) */
92 RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf
, buf_physaddr
) !=
93 offsetof(struct rte_mbuf
, buf_addr
) + 8);
94 vaddr0
= _mm_loadu_si128((__m128i
*)&mb0
->buf_addr
);
95 vaddr1
= _mm_loadu_si128((__m128i
*)&mb1
->buf_addr
);
97 /* convert pa to dma_addr hdr/data */
98 dma_addr0
= _mm_unpackhi_epi64(vaddr0
, vaddr0
);
99 dma_addr1
= _mm_unpackhi_epi64(vaddr1
, vaddr1
);
101 /* add headroom to pa values */
102 dma_addr0
= _mm_add_epi64(dma_addr0
, hdr_room
);
103 dma_addr1
= _mm_add_epi64(dma_addr1
, hdr_room
);
105 /* flush desc with pa dma_addr */
106 _mm_store_si128((__m128i
*)&rxdp
++->read
, dma_addr0
);
107 _mm_store_si128((__m128i
*)&rxdp
++->read
, dma_addr1
);
110 struct rte_mbuf
*mb0
, *mb1
, *mb2
, *mb3
;
111 __m256i dma_addr0_1
, dma_addr2_3
;
112 __m256i hdr_room
= _mm256_set1_epi64x(RTE_PKTMBUF_HEADROOM
);
113 /* Initialize the mbufs in vector, process 4 mbufs in one loop */
114 for (i
= 0; i
< RTE_I40E_RXQ_REARM_THRESH
;
115 i
+= 4, rxep
+= 4, rxdp
+= 4) {
116 __m128i vaddr0
, vaddr1
, vaddr2
, vaddr3
;
117 __m256i vaddr0_1
, vaddr2_3
;
124 /* load buf_addr(lo 64bit) and buf_physaddr(hi 64bit) */
125 RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf
, buf_physaddr
) !=
126 offsetof(struct rte_mbuf
, buf_addr
) + 8);
127 vaddr0
= _mm_loadu_si128((__m128i
*)&mb0
->buf_addr
);
128 vaddr1
= _mm_loadu_si128((__m128i
*)&mb1
->buf_addr
);
129 vaddr2
= _mm_loadu_si128((__m128i
*)&mb2
->buf_addr
);
130 vaddr3
= _mm_loadu_si128((__m128i
*)&mb3
->buf_addr
);
133 * merge 0 & 1, by casting 0 to 256-bit and inserting 1
134 * into the high lanes. Similarly for 2 & 3
136 vaddr0_1
= _mm256_inserti128_si256(
137 _mm256_castsi128_si256(vaddr0
), vaddr1
, 1);
138 vaddr2_3
= _mm256_inserti128_si256(
139 _mm256_castsi128_si256(vaddr2
), vaddr3
, 1);
141 /* convert pa to dma_addr hdr/data */
142 dma_addr0_1
= _mm256_unpackhi_epi64(vaddr0_1
, vaddr0_1
);
143 dma_addr2_3
= _mm256_unpackhi_epi64(vaddr2_3
, vaddr2_3
);
145 /* add headroom to pa values */
146 dma_addr0_1
= _mm256_add_epi64(dma_addr0_1
, hdr_room
);
147 dma_addr2_3
= _mm256_add_epi64(dma_addr2_3
, hdr_room
);
149 /* flush desc with pa dma_addr */
150 _mm256_store_si256((__m256i
*)&rxdp
->read
, dma_addr0_1
);
151 _mm256_store_si256((__m256i
*)&(rxdp
+ 2)->read
, dma_addr2_3
);
156 rxq
->rxrearm_start
+= RTE_I40E_RXQ_REARM_THRESH
;
157 if (rxq
->rxrearm_start
>= rxq
->nb_rx_desc
)
158 rxq
->rxrearm_start
= 0;
160 rxq
->rxrearm_nb
-= RTE_I40E_RXQ_REARM_THRESH
;
162 rx_id
= (uint16_t)((rxq
->rxrearm_start
== 0) ?
163 (rxq
->nb_rx_desc
- 1) : (rxq
->rxrearm_start
- 1));
165 /* Update the tail pointer on the NIC */
166 I40E_PCI_REG_WRITE(rxq
->qrx_tail
, rx_id
);
169 #define PKTLEN_SHIFT 10
171 static inline uint16_t
172 _recv_raw_pkts_vec_avx2(struct i40e_rx_queue
*rxq
, struct rte_mbuf
**rx_pkts
,
173 uint16_t nb_pkts
, uint8_t *split_packet
)
175 #define RTE_I40E_DESCS_PER_LOOP_AVX 8
177 const uint32_t *ptype_tbl
= rxq
->vsi
->adapter
->ptype_tbl
;
178 const __m256i mbuf_init
= _mm256_set_epi64x(0, 0,
179 0, rxq
->mbuf_initializer
);
180 struct i40e_rx_entry
*sw_ring
= &rxq
->sw_ring
[rxq
->rx_tail
];
181 volatile union i40e_rx_desc
*rxdp
= rxq
->rx_ring
+ rxq
->rx_tail
;
182 const int avx_aligned
= ((rxq
->rx_tail
& 1) == 0);
185 /* nb_pkts has to be floor-aligned to RTE_I40E_DESCS_PER_LOOP_AVX */
186 nb_pkts
= RTE_ALIGN_FLOOR(nb_pkts
, RTE_I40E_DESCS_PER_LOOP_AVX
);
188 /* See if we need to rearm the RX queue - gives the prefetch a bit
191 if (rxq
->rxrearm_nb
> RTE_I40E_RXQ_REARM_THRESH
)
194 /* Before we start moving massive data around, check to see if
195 * there is actually a packet available
197 if (!(rxdp
->wb
.qword1
.status_error_len
&
198 rte_cpu_to_le_32(1 << I40E_RX_DESC_STATUS_DD_SHIFT
)))
201 /* constants used in processing loop */
202 const __m256i crc_adjust
= _mm256_set_epi16(
203 /* first descriptor */
204 0, 0, 0, /* ignore non-length fields */
205 -rxq
->crc_len
, /* sub crc on data_len */
206 0, /* ignore high-16bits of pkt_len */
207 -rxq
->crc_len
, /* sub crc on pkt_len */
208 0, 0, /* ignore pkt_type field */
209 /* second descriptor */
210 0, 0, 0, /* ignore non-length fields */
211 -rxq
->crc_len
, /* sub crc on data_len */
212 0, /* ignore high-16bits of pkt_len */
213 -rxq
->crc_len
, /* sub crc on pkt_len */
214 0, 0 /* ignore pkt_type field */
217 /* 8 packets DD mask, LSB in each 32-bit value */
218 const __m256i dd_check
= _mm256_set1_epi32(1);
220 /* 8 packets EOP mask, second-LSB in each 32-bit value */
221 const __m256i eop_check
= _mm256_slli_epi32(dd_check
,
222 I40E_RX_DESC_STATUS_EOF_SHIFT
);
224 /* mask to shuffle from desc. to mbuf (2 descriptors)*/
225 const __m256i shuf_msk
= _mm256_set_epi8(
226 /* first descriptor */
227 7, 6, 5, 4, /* octet 4~7, 32bits rss */
228 3, 2, /* octet 2~3, low 16 bits vlan_macip */
229 15, 14, /* octet 15~14, 16 bits data_len */
230 0xFF, 0xFF, /* skip high 16 bits pkt_len, zero out */
231 15, 14, /* octet 15~14, low 16 bits pkt_len */
232 0xFF, 0xFF, /* pkt_type set as unknown */
233 0xFF, 0xFF, /*pkt_type set as unknown */
234 /* second descriptor */
235 7, 6, 5, 4, /* octet 4~7, 32bits rss */
236 3, 2, /* octet 2~3, low 16 bits vlan_macip */
237 15, 14, /* octet 15~14, 16 bits data_len */
238 0xFF, 0xFF, /* skip high 16 bits pkt_len, zero out */
239 15, 14, /* octet 15~14, low 16 bits pkt_len */
240 0xFF, 0xFF, /* pkt_type set as unknown */
241 0xFF, 0xFF /*pkt_type set as unknown */
244 * compile-time check the above crc and shuffle layout is correct.
245 * NOTE: the first field (lowest address) is given last in set_epi
248 RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf
, pkt_len
) !=
249 offsetof(struct rte_mbuf
, rx_descriptor_fields1
) + 4);
250 RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf
, data_len
) !=
251 offsetof(struct rte_mbuf
, rx_descriptor_fields1
) + 8);
252 RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf
, vlan_tci
) !=
253 offsetof(struct rte_mbuf
, rx_descriptor_fields1
) + 10);
254 RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf
, hash
) !=
255 offsetof(struct rte_mbuf
, rx_descriptor_fields1
) + 12);
257 /* Status/Error flag masks */
259 * mask everything except RSS, flow director and VLAN flags
260 * bit2 is for VLAN tag, bit11 for flow director indication
261 * bit13:12 for RSS indication. Bits 3-5 of error
262 * field (bits 22-24) are for IP/L4 checksum errors
264 const __m256i flags_mask
= _mm256_set1_epi32(
265 (1 << 2) | (1 << 11) | (3 << 12) | (7 << 22));
267 * data to be shuffled by result of flag mask. If VLAN bit is set,
268 * (bit 2), then position 4 in this array will be used in the
271 const __m256i vlan_flags_shuf
= _mm256_set_epi32(
272 0, 0, PKT_RX_VLAN
| PKT_RX_VLAN_STRIPPED
, 0,
273 0, 0, PKT_RX_VLAN
| PKT_RX_VLAN_STRIPPED
, 0);
275 * data to be shuffled by result of flag mask, shifted down 11.
276 * If RSS/FDIR bits are set, shuffle moves appropriate flags in
279 const __m256i rss_flags_shuf
= _mm256_set_epi8(
280 0, 0, 0, 0, 0, 0, 0, 0,
281 PKT_RX_RSS_HASH
| PKT_RX_FDIR
, PKT_RX_RSS_HASH
, 0, 0,
282 0, 0, PKT_RX_FDIR
, 0, /* end up 128-bits */
283 0, 0, 0, 0, 0, 0, 0, 0,
284 PKT_RX_RSS_HASH
| PKT_RX_FDIR
, PKT_RX_RSS_HASH
, 0, 0,
285 0, 0, PKT_RX_FDIR
, 0);
288 * data to be shuffled by the result of the flags mask shifted by 22
289 * bits. This gives use the l3_l4 flags.
291 const __m256i l3_l4_flags_shuf
= _mm256_set_epi8(0, 0, 0, 0, 0, 0, 0, 0,
292 /* shift right 1 bit to make sure it not exceed 255 */
293 (PKT_RX_EIP_CKSUM_BAD
| PKT_RX_L4_CKSUM_BAD
| PKT_RX_IP_CKSUM_BAD
) >> 1,
294 (PKT_RX_IP_CKSUM_GOOD
| PKT_RX_EIP_CKSUM_BAD
| PKT_RX_L4_CKSUM_BAD
) >> 1,
295 (PKT_RX_EIP_CKSUM_BAD
| PKT_RX_IP_CKSUM_BAD
) >> 1,
296 (PKT_RX_IP_CKSUM_GOOD
| PKT_RX_EIP_CKSUM_BAD
) >> 1,
297 (PKT_RX_L4_CKSUM_BAD
| PKT_RX_IP_CKSUM_BAD
) >> 1,
298 (PKT_RX_IP_CKSUM_GOOD
| PKT_RX_L4_CKSUM_BAD
) >> 1,
299 PKT_RX_IP_CKSUM_BAD
>> 1,
300 (PKT_RX_IP_CKSUM_GOOD
| PKT_RX_L4_CKSUM_GOOD
) >> 1,
301 /* second 128-bits */
302 0, 0, 0, 0, 0, 0, 0, 0,
303 (PKT_RX_EIP_CKSUM_BAD
| PKT_RX_L4_CKSUM_BAD
| PKT_RX_IP_CKSUM_BAD
) >> 1,
304 (PKT_RX_IP_CKSUM_GOOD
| PKT_RX_EIP_CKSUM_BAD
| PKT_RX_L4_CKSUM_BAD
) >> 1,
305 (PKT_RX_EIP_CKSUM_BAD
| PKT_RX_IP_CKSUM_BAD
) >> 1,
306 (PKT_RX_IP_CKSUM_GOOD
| PKT_RX_EIP_CKSUM_BAD
) >> 1,
307 (PKT_RX_L4_CKSUM_BAD
| PKT_RX_IP_CKSUM_BAD
) >> 1,
308 (PKT_RX_IP_CKSUM_GOOD
| PKT_RX_L4_CKSUM_BAD
) >> 1,
309 PKT_RX_IP_CKSUM_BAD
>> 1,
310 (PKT_RX_IP_CKSUM_GOOD
| PKT_RX_L4_CKSUM_GOOD
) >> 1);
312 const __m256i cksum_mask
= _mm256_set1_epi32(
313 PKT_RX_IP_CKSUM_GOOD
| PKT_RX_IP_CKSUM_BAD
|
314 PKT_RX_L4_CKSUM_GOOD
| PKT_RX_L4_CKSUM_BAD
|
315 PKT_RX_EIP_CKSUM_BAD
);
317 RTE_SET_USED(avx_aligned
); /* for 32B descriptors we don't use this */
319 uint16_t i
, received
;
320 for (i
= 0, received
= 0; i
< nb_pkts
;
321 i
+= RTE_I40E_DESCS_PER_LOOP_AVX
,
322 rxdp
+= RTE_I40E_DESCS_PER_LOOP_AVX
) {
323 /* step 1, copy over 8 mbuf pointers to rx_pkts array */
324 _mm256_storeu_si256((void *)&rx_pkts
[i
],
325 _mm256_loadu_si256((void *)&sw_ring
[i
]));
326 #ifdef RTE_ARCH_X86_64
327 _mm256_storeu_si256((void *)&rx_pkts
[i
+ 4],
328 _mm256_loadu_si256((void *)&sw_ring
[i
+ 4]));
331 __m256i raw_desc0_1
, raw_desc2_3
, raw_desc4_5
, raw_desc6_7
;
332 #ifdef RTE_LIBRTE_I40E_16BYTE_RX_DESC
333 /* for AVX we need alignment otherwise loads are not atomic */
335 /* load in descriptors, 2 at a time, in reverse order */
336 raw_desc6_7
= _mm256_load_si256((void *)(rxdp
+ 6));
337 rte_compiler_barrier();
338 raw_desc4_5
= _mm256_load_si256((void *)(rxdp
+ 4));
339 rte_compiler_barrier();
340 raw_desc2_3
= _mm256_load_si256((void *)(rxdp
+ 2));
341 rte_compiler_barrier();
342 raw_desc0_1
= _mm256_load_si256((void *)(rxdp
+ 0));
346 const __m128i raw_desc7
= _mm_load_si128((void *)(rxdp
+ 7));
347 rte_compiler_barrier();
348 const __m128i raw_desc6
= _mm_load_si128((void *)(rxdp
+ 6));
349 rte_compiler_barrier();
350 const __m128i raw_desc5
= _mm_load_si128((void *)(rxdp
+ 5));
351 rte_compiler_barrier();
352 const __m128i raw_desc4
= _mm_load_si128((void *)(rxdp
+ 4));
353 rte_compiler_barrier();
354 const __m128i raw_desc3
= _mm_load_si128((void *)(rxdp
+ 3));
355 rte_compiler_barrier();
356 const __m128i raw_desc2
= _mm_load_si128((void *)(rxdp
+ 2));
357 rte_compiler_barrier();
358 const __m128i raw_desc1
= _mm_load_si128((void *)(rxdp
+ 1));
359 rte_compiler_barrier();
360 const __m128i raw_desc0
= _mm_load_si128((void *)(rxdp
+ 0));
362 raw_desc6_7
= _mm256_inserti128_si256(
363 _mm256_castsi128_si256(raw_desc6
), raw_desc7
, 1);
364 raw_desc4_5
= _mm256_inserti128_si256(
365 _mm256_castsi128_si256(raw_desc4
), raw_desc5
, 1);
366 raw_desc2_3
= _mm256_inserti128_si256(
367 _mm256_castsi128_si256(raw_desc2
), raw_desc3
, 1);
368 raw_desc0_1
= _mm256_inserti128_si256(
369 _mm256_castsi128_si256(raw_desc0
), raw_desc1
, 1);
374 for (j
= 0; j
< RTE_I40E_DESCS_PER_LOOP_AVX
; j
++)
375 rte_mbuf_prefetch_part2(rx_pkts
[i
+ j
]);
379 * convert descriptors 4-7 into mbufs, adjusting length and
380 * re-arranging fields. Then write into the mbuf
382 const __m256i len6_7
= _mm256_slli_epi32(raw_desc6_7
, PKTLEN_SHIFT
);
383 const __m256i len4_5
= _mm256_slli_epi32(raw_desc4_5
, PKTLEN_SHIFT
);
384 const __m256i desc6_7
= _mm256_blend_epi16(raw_desc6_7
, len6_7
, 0x80);
385 const __m256i desc4_5
= _mm256_blend_epi16(raw_desc4_5
, len4_5
, 0x80);
386 __m256i mb6_7
= _mm256_shuffle_epi8(desc6_7
, shuf_msk
);
387 __m256i mb4_5
= _mm256_shuffle_epi8(desc4_5
, shuf_msk
);
388 mb6_7
= _mm256_add_epi16(mb6_7
, crc_adjust
);
389 mb4_5
= _mm256_add_epi16(mb4_5
, crc_adjust
);
391 * to get packet types, shift 64-bit values down 30 bits
392 * and so ptype is in lower 8-bits in each
394 const __m256i ptypes6_7
= _mm256_srli_epi64(desc6_7
, 30);
395 const __m256i ptypes4_5
= _mm256_srli_epi64(desc4_5
, 30);
396 const uint8_t ptype7
= _mm256_extract_epi8(ptypes6_7
, 24);
397 const uint8_t ptype6
= _mm256_extract_epi8(ptypes6_7
, 8);
398 const uint8_t ptype5
= _mm256_extract_epi8(ptypes4_5
, 24);
399 const uint8_t ptype4
= _mm256_extract_epi8(ptypes4_5
, 8);
400 mb6_7
= _mm256_insert_epi32(mb6_7
, ptype_tbl
[ptype7
], 4);
401 mb6_7
= _mm256_insert_epi32(mb6_7
, ptype_tbl
[ptype6
], 0);
402 mb4_5
= _mm256_insert_epi32(mb4_5
, ptype_tbl
[ptype5
], 4);
403 mb4_5
= _mm256_insert_epi32(mb4_5
, ptype_tbl
[ptype4
], 0);
404 /* merge the status bits into one register */
405 const __m256i status4_7
= _mm256_unpackhi_epi32(desc6_7
,
409 * convert descriptors 0-3 into mbufs, adjusting length and
410 * re-arranging fields. Then write into the mbuf
412 const __m256i len2_3
= _mm256_slli_epi32(raw_desc2_3
, PKTLEN_SHIFT
);
413 const __m256i len0_1
= _mm256_slli_epi32(raw_desc0_1
, PKTLEN_SHIFT
);
414 const __m256i desc2_3
= _mm256_blend_epi16(raw_desc2_3
, len2_3
, 0x80);
415 const __m256i desc0_1
= _mm256_blend_epi16(raw_desc0_1
, len0_1
, 0x80);
416 __m256i mb2_3
= _mm256_shuffle_epi8(desc2_3
, shuf_msk
);
417 __m256i mb0_1
= _mm256_shuffle_epi8(desc0_1
, shuf_msk
);
418 mb2_3
= _mm256_add_epi16(mb2_3
, crc_adjust
);
419 mb0_1
= _mm256_add_epi16(mb0_1
, crc_adjust
);
420 /* get the packet types */
421 const __m256i ptypes2_3
= _mm256_srli_epi64(desc2_3
, 30);
422 const __m256i ptypes0_1
= _mm256_srli_epi64(desc0_1
, 30);
423 const uint8_t ptype3
= _mm256_extract_epi8(ptypes2_3
, 24);
424 const uint8_t ptype2
= _mm256_extract_epi8(ptypes2_3
, 8);
425 const uint8_t ptype1
= _mm256_extract_epi8(ptypes0_1
, 24);
426 const uint8_t ptype0
= _mm256_extract_epi8(ptypes0_1
, 8);
427 mb2_3
= _mm256_insert_epi32(mb2_3
, ptype_tbl
[ptype3
], 4);
428 mb2_3
= _mm256_insert_epi32(mb2_3
, ptype_tbl
[ptype2
], 0);
429 mb0_1
= _mm256_insert_epi32(mb0_1
, ptype_tbl
[ptype1
], 4);
430 mb0_1
= _mm256_insert_epi32(mb0_1
, ptype_tbl
[ptype0
], 0);
431 /* merge the status bits into one register */
432 const __m256i status0_3
= _mm256_unpackhi_epi32(desc2_3
,
436 * take the two sets of status bits and merge to one
437 * After merge, the packets status flags are in the
438 * order (hi->lo): [1, 3, 5, 7, 0, 2, 4, 6]
440 __m256i status0_7
= _mm256_unpacklo_epi64(status4_7
,
443 /* now do flag manipulation */
445 /* get only flag/error bits we want */
446 const __m256i flag_bits
= _mm256_and_si256(
447 status0_7
, flags_mask
);
448 /* set vlan and rss flags */
449 const __m256i vlan_flags
= _mm256_shuffle_epi8(
450 vlan_flags_shuf
, flag_bits
);
451 const __m256i rss_flags
= _mm256_shuffle_epi8(
452 rss_flags_shuf
, _mm256_srli_epi32(flag_bits
, 11));
454 * l3_l4_error flags, shuffle, then shift to correct adjustment
455 * of flags in flags_shuf, and finally mask out extra bits
457 __m256i l3_l4_flags
= _mm256_shuffle_epi8(l3_l4_flags_shuf
,
458 _mm256_srli_epi32(flag_bits
, 22));
459 l3_l4_flags
= _mm256_slli_epi32(l3_l4_flags
, 1);
460 l3_l4_flags
= _mm256_and_si256(l3_l4_flags
, cksum_mask
);
463 const __m256i mbuf_flags
= _mm256_or_si256(l3_l4_flags
,
464 _mm256_or_si256(rss_flags
, vlan_flags
));
466 * At this point, we have the 8 sets of flags in the low 16-bits
467 * of each 32-bit value in vlan0.
468 * We want to extract these, and merge them with the mbuf init data
469 * so we can do a single write to the mbuf to set the flags
470 * and all the other initialization fields. Extracting the
471 * appropriate flags means that we have to do a shift and blend for
472 * each mbuf before we do the write. However, we can also
473 * add in the previously computed rx_descriptor fields to
474 * make a single 256-bit write per mbuf
476 /* check the structure matches expectations */
477 RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf
, ol_flags
) !=
478 offsetof(struct rte_mbuf
, rearm_data
) + 8);
479 RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf
, rearm_data
) !=
480 RTE_ALIGN(offsetof(struct rte_mbuf
, rearm_data
), 16));
481 /* build up data and do writes */
482 __m256i rearm0
, rearm1
, rearm2
, rearm3
, rearm4
, rearm5
,
484 rearm6
= _mm256_blend_epi32(mbuf_init
, _mm256_slli_si256(mbuf_flags
, 8), 0x04);
485 rearm4
= _mm256_blend_epi32(mbuf_init
, _mm256_slli_si256(mbuf_flags
, 4), 0x04);
486 rearm2
= _mm256_blend_epi32(mbuf_init
, mbuf_flags
, 0x04);
487 rearm0
= _mm256_blend_epi32(mbuf_init
, _mm256_srli_si256(mbuf_flags
, 4), 0x04);
488 /* permute to add in the rx_descriptor e.g. rss fields */
489 rearm6
= _mm256_permute2f128_si256(rearm6
, mb6_7
, 0x20);
490 rearm4
= _mm256_permute2f128_si256(rearm4
, mb4_5
, 0x20);
491 rearm2
= _mm256_permute2f128_si256(rearm2
, mb2_3
, 0x20);
492 rearm0
= _mm256_permute2f128_si256(rearm0
, mb0_1
, 0x20);
494 _mm256_storeu_si256((__m256i
*)&rx_pkts
[i
+ 6]->rearm_data
, rearm6
);
495 _mm256_storeu_si256((__m256i
*)&rx_pkts
[i
+ 4]->rearm_data
, rearm4
);
496 _mm256_storeu_si256((__m256i
*)&rx_pkts
[i
+ 2]->rearm_data
, rearm2
);
497 _mm256_storeu_si256((__m256i
*)&rx_pkts
[i
+ 0]->rearm_data
, rearm0
);
499 /* repeat for the odd mbufs */
500 const __m256i odd_flags
= _mm256_castsi128_si256(
501 _mm256_extracti128_si256(mbuf_flags
, 1));
502 rearm7
= _mm256_blend_epi32(mbuf_init
, _mm256_slli_si256(odd_flags
, 8), 0x04);
503 rearm5
= _mm256_blend_epi32(mbuf_init
, _mm256_slli_si256(odd_flags
, 4), 0x04);
504 rearm3
= _mm256_blend_epi32(mbuf_init
, odd_flags
, 0x04);
505 rearm1
= _mm256_blend_epi32(mbuf_init
, _mm256_srli_si256(odd_flags
, 4), 0x04);
506 /* since odd mbufs are already in hi 128-bits use blend */
507 rearm7
= _mm256_blend_epi32(rearm7
, mb6_7
, 0xF0);
508 rearm5
= _mm256_blend_epi32(rearm5
, mb4_5
, 0xF0);
509 rearm3
= _mm256_blend_epi32(rearm3
, mb2_3
, 0xF0);
510 rearm1
= _mm256_blend_epi32(rearm1
, mb0_1
, 0xF0);
511 /* again write to mbufs */
512 _mm256_storeu_si256((__m256i
*)&rx_pkts
[i
+ 7]->rearm_data
, rearm7
);
513 _mm256_storeu_si256((__m256i
*)&rx_pkts
[i
+ 5]->rearm_data
, rearm5
);
514 _mm256_storeu_si256((__m256i
*)&rx_pkts
[i
+ 3]->rearm_data
, rearm3
);
515 _mm256_storeu_si256((__m256i
*)&rx_pkts
[i
+ 1]->rearm_data
, rearm1
);
517 /* extract and record EOP bit */
519 const __m128i eop_mask
= _mm_set1_epi16(
520 1 << I40E_RX_DESC_STATUS_EOF_SHIFT
);
521 const __m256i eop_bits256
= _mm256_and_si256(status0_7
,
523 /* pack status bits into a single 128-bit register */
524 const __m128i eop_bits
= _mm_packus_epi32(
525 _mm256_castsi256_si128(eop_bits256
),
526 _mm256_extractf128_si256(eop_bits256
, 1));
528 * flip bits, and mask out the EOP bit, which is now
529 * a split-packet bit i.e. !EOP, rather than EOP one.
531 __m128i split_bits
= _mm_andnot_si128(eop_bits
,
534 * eop bits are out of order, so we need to shuffle them
535 * back into order again. In doing so, only use low 8
536 * bits, which acts like another pack instruction
537 * The original order is (hi->lo): 1,3,5,7,0,2,4,6
538 * [Since we use epi8, the 16-bit positions are
539 * multiplied by 2 in the eop_shuffle value.]
541 __m128i eop_shuffle
= _mm_set_epi8(
542 0xFF, 0xFF, 0xFF, 0xFF, /* zero hi 64b */
543 0xFF, 0xFF, 0xFF, 0xFF,
544 8, 0, 10, 2, /* move values to lo 64b */
546 split_bits
= _mm_shuffle_epi8(split_bits
, eop_shuffle
);
547 *(uint64_t *)split_packet
= _mm_cvtsi128_si64(split_bits
);
548 split_packet
+= RTE_I40E_DESCS_PER_LOOP_AVX
;
551 /* perform dd_check */
552 status0_7
= _mm256_and_si256(status0_7
, dd_check
);
553 status0_7
= _mm256_packs_epi32(status0_7
,
554 _mm256_setzero_si256());
556 uint64_t burst
= __builtin_popcountll(_mm_cvtsi128_si64(
557 _mm256_extracti128_si256(status0_7
, 1)));
558 burst
+= __builtin_popcountll(_mm_cvtsi128_si64(
559 _mm256_castsi256_si128(status0_7
)));
561 if (burst
!= RTE_I40E_DESCS_PER_LOOP_AVX
)
565 /* update tail pointers */
566 rxq
->rx_tail
+= received
;
567 rxq
->rx_tail
&= (rxq
->nb_rx_desc
- 1);
568 if ((rxq
->rx_tail
& 1) == 1 && received
> 1) { /* keep avx2 aligned */
572 rxq
->rxrearm_nb
+= received
;
578 * - nb_pkts < RTE_I40E_DESCS_PER_LOOP, just return no packet
581 i40e_recv_pkts_vec_avx2(void *rx_queue
, struct rte_mbuf
**rx_pkts
,
584 return _recv_raw_pkts_vec_avx2(rx_queue
, rx_pkts
, nb_pkts
, NULL
);
588 * vPMD receive routine that reassembles single burst of 32 scattered packets
590 * - nb_pkts < RTE_I40E_DESCS_PER_LOOP, just return no packet
593 i40e_recv_scattered_burst_vec_avx2(void *rx_queue
, struct rte_mbuf
**rx_pkts
,
596 struct i40e_rx_queue
*rxq
= rx_queue
;
597 uint8_t split_flags
[RTE_I40E_VPMD_RX_BURST
] = {0};
599 /* get some new buffers */
600 uint16_t nb_bufs
= _recv_raw_pkts_vec_avx2(rxq
, rx_pkts
, nb_pkts
,
605 /* happy day case, full burst + no packets to be joined */
606 const uint64_t *split_fl64
= (uint64_t *)split_flags
;
608 if (rxq
->pkt_first_seg
== NULL
&&
609 split_fl64
[0] == 0 && split_fl64
[1] == 0 &&
610 split_fl64
[2] == 0 && split_fl64
[3] == 0)
613 /* reassemble any packets that need reassembly*/
616 if (rxq
->pkt_first_seg
== NULL
) {
617 /* find the first split flag, and only reassemble then*/
618 while (i
< nb_bufs
&& !split_flags
[i
])
623 return i
+ reassemble_packets(rxq
, &rx_pkts
[i
], nb_bufs
- i
,
628 * vPMD receive routine that reassembles scattered packets.
629 * Main receive routine that can handle arbitrary burst sizes
631 * - nb_pkts < RTE_I40E_DESCS_PER_LOOP, just return no packet
634 i40e_recv_scattered_pkts_vec_avx2(void *rx_queue
, struct rte_mbuf
**rx_pkts
,
638 while (nb_pkts
> RTE_I40E_VPMD_RX_BURST
) {
639 uint16_t burst
= i40e_recv_scattered_burst_vec_avx2(rx_queue
,
640 rx_pkts
+ retval
, RTE_I40E_VPMD_RX_BURST
);
643 if (burst
< RTE_I40E_VPMD_RX_BURST
)
646 return retval
+ i40e_recv_scattered_burst_vec_avx2(rx_queue
,
647 rx_pkts
+ retval
, nb_pkts
);
652 vtx1(volatile struct i40e_tx_desc
*txdp
,
653 struct rte_mbuf
*pkt
, uint64_t flags
)
655 uint64_t high_qw
= (I40E_TX_DESC_DTYPE_DATA
|
656 ((uint64_t)flags
<< I40E_TXD_QW1_CMD_SHIFT
) |
657 ((uint64_t)pkt
->data_len
<< I40E_TXD_QW1_TX_BUF_SZ_SHIFT
));
659 __m128i descriptor
= _mm_set_epi64x(high_qw
,
660 pkt
->buf_physaddr
+ pkt
->data_off
);
661 _mm_store_si128((__m128i
*)txdp
, descriptor
);
665 vtx(volatile struct i40e_tx_desc
*txdp
,
666 struct rte_mbuf
**pkt
, uint16_t nb_pkts
, uint64_t flags
)
668 const uint64_t hi_qw_tmpl
= (I40E_TX_DESC_DTYPE_DATA
|
669 ((uint64_t)flags
<< I40E_TXD_QW1_CMD_SHIFT
));
671 /* if unaligned on 32-bit boundary, do one to align */
672 if (((uintptr_t)txdp
& 0x1F) != 0 && nb_pkts
!= 0) {
673 vtx1(txdp
, *pkt
, flags
);
674 nb_pkts
--, txdp
++, pkt
++;
677 /* do two at a time while possible, in bursts */
678 for (; nb_pkts
> 3; txdp
+= 4, pkt
+= 4, nb_pkts
-= 4) {
679 uint64_t hi_qw3
= hi_qw_tmpl
|
680 ((uint64_t)pkt
[3]->data_len
<< I40E_TXD_QW1_TX_BUF_SZ_SHIFT
);
681 uint64_t hi_qw2
= hi_qw_tmpl
|
682 ((uint64_t)pkt
[2]->data_len
<< I40E_TXD_QW1_TX_BUF_SZ_SHIFT
);
683 uint64_t hi_qw1
= hi_qw_tmpl
|
684 ((uint64_t)pkt
[1]->data_len
<< I40E_TXD_QW1_TX_BUF_SZ_SHIFT
);
685 uint64_t hi_qw0
= hi_qw_tmpl
|
686 ((uint64_t)pkt
[0]->data_len
<< I40E_TXD_QW1_TX_BUF_SZ_SHIFT
);
688 __m256i desc2_3
= _mm256_set_epi64x(
689 hi_qw3
, pkt
[3]->buf_physaddr
+ pkt
[3]->data_off
,
690 hi_qw2
, pkt
[2]->buf_physaddr
+ pkt
[2]->data_off
);
691 __m256i desc0_1
= _mm256_set_epi64x(
692 hi_qw1
, pkt
[1]->buf_physaddr
+ pkt
[1]->data_off
,
693 hi_qw0
, pkt
[0]->buf_physaddr
+ pkt
[0]->data_off
);
694 _mm256_store_si256((void *)(txdp
+ 2), desc2_3
);
695 _mm256_store_si256((void *)txdp
, desc0_1
);
698 /* do any last ones */
700 vtx1(txdp
, *pkt
, flags
);
701 txdp
++, pkt
++, nb_pkts
--;
705 static inline uint16_t
706 i40e_xmit_fixed_burst_vec_avx2(void *tx_queue
, struct rte_mbuf
**tx_pkts
,
709 struct i40e_tx_queue
*txq
= (struct i40e_tx_queue
*)tx_queue
;
710 volatile struct i40e_tx_desc
*txdp
;
711 struct i40e_tx_entry
*txep
;
712 uint16_t n
, nb_commit
, tx_id
;
713 uint64_t flags
= I40E_TD_CMD
;
714 uint64_t rs
= I40E_TX_DESC_CMD_RS
| I40E_TD_CMD
;
716 /* cross rx_thresh boundary is not allowed */
717 nb_pkts
= RTE_MIN(nb_pkts
, txq
->tx_rs_thresh
);
719 if (txq
->nb_tx_free
< txq
->tx_free_thresh
)
720 i40e_tx_free_bufs(txq
);
722 nb_commit
= nb_pkts
= (uint16_t)RTE_MIN(txq
->nb_tx_free
, nb_pkts
);
723 if (unlikely(nb_pkts
== 0))
726 tx_id
= txq
->tx_tail
;
727 txdp
= &txq
->tx_ring
[tx_id
];
728 txep
= &txq
->sw_ring
[tx_id
];
730 txq
->nb_tx_free
= (uint16_t)(txq
->nb_tx_free
- nb_pkts
);
732 n
= (uint16_t)(txq
->nb_tx_desc
- tx_id
);
733 if (nb_commit
>= n
) {
734 tx_backlog_entry(txep
, tx_pkts
, n
);
736 vtx(txdp
, tx_pkts
, n
- 1, flags
);
740 vtx1(txdp
, *tx_pkts
++, rs
);
742 nb_commit
= (uint16_t)(nb_commit
- n
);
745 txq
->tx_next_rs
= (uint16_t)(txq
->tx_rs_thresh
- 1);
747 /* avoid reach the end of ring */
748 txdp
= &txq
->tx_ring
[tx_id
];
749 txep
= &txq
->sw_ring
[tx_id
];
752 tx_backlog_entry(txep
, tx_pkts
, nb_commit
);
754 vtx(txdp
, tx_pkts
, nb_commit
, flags
);
756 tx_id
= (uint16_t)(tx_id
+ nb_commit
);
757 if (tx_id
> txq
->tx_next_rs
) {
758 txq
->tx_ring
[txq
->tx_next_rs
].cmd_type_offset_bsz
|=
759 rte_cpu_to_le_64(((uint64_t)I40E_TX_DESC_CMD_RS
) <<
760 I40E_TXD_QW1_CMD_SHIFT
);
762 (uint16_t)(txq
->tx_next_rs
+ txq
->tx_rs_thresh
);
765 txq
->tx_tail
= tx_id
;
767 I40E_PCI_REG_WRITE(txq
->qtx_tail
, txq
->tx_tail
);
773 i40e_xmit_pkts_vec_avx2(void *tx_queue
, struct rte_mbuf
**tx_pkts
,
777 struct i40e_tx_queue
*txq
= (struct i40e_tx_queue
*)tx_queue
;
782 num
= (uint16_t)RTE_MIN(nb_pkts
, txq
->tx_rs_thresh
);
783 ret
= i40e_xmit_fixed_burst_vec_avx2(tx_queue
, &tx_pkts
[nb_tx
],