1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2019 Intel Corporation
5 #include "iavf_rxtx_vec_common.h"
9 #ifndef __INTEL_COMPILER
10 #pragma GCC diagnostic ignored "-Wcast-qual"
14 iavf_rxq_rearm(struct iavf_rx_queue
*rxq
)
18 volatile union iavf_rx_desc
*rxdp
;
19 struct rte_mbuf
**rxp
= &rxq
->sw_ring
[rxq
->rxrearm_start
];
21 rxdp
= rxq
->rx_ring
+ rxq
->rxrearm_start
;
23 /* Pull 'n' more MBUFs into the software ring */
24 if (rte_mempool_get_bulk(rxq
->mp
,
26 IAVF_RXQ_REARM_THRESH
) < 0) {
27 if (rxq
->rxrearm_nb
+ IAVF_RXQ_REARM_THRESH
>=
31 dma_addr0
= _mm_setzero_si128();
32 for (i
= 0; i
< IAVF_VPMD_DESCS_PER_LOOP
; i
++) {
33 rxp
[i
] = &rxq
->fake_mbuf
;
34 _mm_store_si128((__m128i
*)&rxdp
[i
].read
,
38 rte_eth_devices
[rxq
->port_id
].data
->rx_mbuf_alloc_failed
+=
39 IAVF_RXQ_REARM_THRESH
;
43 #ifndef RTE_LIBRTE_IAVF_16BYTE_RX_DESC
44 struct rte_mbuf
*mb0
, *mb1
;
45 __m128i dma_addr0
, dma_addr1
;
46 __m128i hdr_room
= _mm_set_epi64x(RTE_PKTMBUF_HEADROOM
,
47 RTE_PKTMBUF_HEADROOM
);
48 /* Initialize the mbufs in vector, process 2 mbufs in one loop */
49 for (i
= 0; i
< IAVF_RXQ_REARM_THRESH
; i
+= 2, rxp
+= 2) {
50 __m128i vaddr0
, vaddr1
;
55 /* load buf_addr(lo 64bit) and buf_physaddr(hi 64bit) */
56 RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf
, buf_physaddr
) !=
57 offsetof(struct rte_mbuf
, buf_addr
) + 8);
58 vaddr0
= _mm_loadu_si128((__m128i
*)&mb0
->buf_addr
);
59 vaddr1
= _mm_loadu_si128((__m128i
*)&mb1
->buf_addr
);
61 /* convert pa to dma_addr hdr/data */
62 dma_addr0
= _mm_unpackhi_epi64(vaddr0
, vaddr0
);
63 dma_addr1
= _mm_unpackhi_epi64(vaddr1
, vaddr1
);
65 /* add headroom to pa values */
66 dma_addr0
= _mm_add_epi64(dma_addr0
, hdr_room
);
67 dma_addr1
= _mm_add_epi64(dma_addr1
, hdr_room
);
69 /* flush desc with pa dma_addr */
70 _mm_store_si128((__m128i
*)&rxdp
++->read
, dma_addr0
);
71 _mm_store_si128((__m128i
*)&rxdp
++->read
, dma_addr1
);
74 struct rte_mbuf
*mb0
, *mb1
, *mb2
, *mb3
;
75 __m256i dma_addr0_1
, dma_addr2_3
;
76 __m256i hdr_room
= _mm256_set1_epi64x(RTE_PKTMBUF_HEADROOM
);
77 /* Initialize the mbufs in vector, process 4 mbufs in one loop */
78 for (i
= 0; i
< IAVF_RXQ_REARM_THRESH
;
79 i
+= 4, rxp
+= 4, rxdp
+= 4) {
80 __m128i vaddr0
, vaddr1
, vaddr2
, vaddr3
;
81 __m256i vaddr0_1
, vaddr2_3
;
88 /* load buf_addr(lo 64bit) and buf_physaddr(hi 64bit) */
89 RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf
, buf_physaddr
) !=
90 offsetof(struct rte_mbuf
, buf_addr
) + 8);
91 vaddr0
= _mm_loadu_si128((__m128i
*)&mb0
->buf_addr
);
92 vaddr1
= _mm_loadu_si128((__m128i
*)&mb1
->buf_addr
);
93 vaddr2
= _mm_loadu_si128((__m128i
*)&mb2
->buf_addr
);
94 vaddr3
= _mm_loadu_si128((__m128i
*)&mb3
->buf_addr
);
97 * merge 0 & 1, by casting 0 to 256-bit and inserting 1
98 * into the high lanes. Similarly for 2 & 3
101 _mm256_inserti128_si256(_mm256_castsi128_si256(vaddr0
),
104 _mm256_inserti128_si256(_mm256_castsi128_si256(vaddr2
),
107 /* convert pa to dma_addr hdr/data */
108 dma_addr0_1
= _mm256_unpackhi_epi64(vaddr0_1
, vaddr0_1
);
109 dma_addr2_3
= _mm256_unpackhi_epi64(vaddr2_3
, vaddr2_3
);
111 /* add headroom to pa values */
112 dma_addr0_1
= _mm256_add_epi64(dma_addr0_1
, hdr_room
);
113 dma_addr2_3
= _mm256_add_epi64(dma_addr2_3
, hdr_room
);
115 /* flush desc with pa dma_addr */
116 _mm256_store_si256((__m256i
*)&rxdp
->read
, dma_addr0_1
);
117 _mm256_store_si256((__m256i
*)&(rxdp
+ 2)->read
, dma_addr2_3
);
122 rxq
->rxrearm_start
+= IAVF_RXQ_REARM_THRESH
;
123 if (rxq
->rxrearm_start
>= rxq
->nb_rx_desc
)
124 rxq
->rxrearm_start
= 0;
126 rxq
->rxrearm_nb
-= IAVF_RXQ_REARM_THRESH
;
128 rx_id
= (uint16_t)((rxq
->rxrearm_start
== 0) ?
129 (rxq
->nb_rx_desc
- 1) : (rxq
->rxrearm_start
- 1));
131 /* Update the tail pointer on the NIC */
132 IAVF_PCI_REG_WRITE(rxq
->qrx_tail
, rx_id
);
135 #define PKTLEN_SHIFT 10
137 static inline uint16_t
138 _iavf_recv_raw_pkts_vec_avx2(struct iavf_rx_queue
*rxq
,
139 struct rte_mbuf
**rx_pkts
,
140 uint16_t nb_pkts
, uint8_t *split_packet
)
142 #define IAVF_DESCS_PER_LOOP_AVX 8
144 /* const uint32_t *ptype_tbl = rxq->vsi->adapter->ptype_tbl; */
145 const uint32_t *type_table
= rxq
->vsi
->adapter
->ptype_tbl
;
147 const __m256i mbuf_init
= _mm256_set_epi64x(0, 0,
148 0, rxq
->mbuf_initializer
);
149 /* struct iavf_rx_entry *sw_ring = &rxq->sw_ring[rxq->rx_tail]; */
150 struct rte_mbuf
**sw_ring
= &rxq
->sw_ring
[rxq
->rx_tail
];
151 volatile union iavf_rx_desc
*rxdp
= rxq
->rx_ring
+ rxq
->rx_tail
;
152 const int avx_aligned
= ((rxq
->rx_tail
& 1) == 0);
156 /* nb_pkts has to be floor-aligned to IAVF_DESCS_PER_LOOP_AVX */
157 nb_pkts
= RTE_ALIGN_FLOOR(nb_pkts
, IAVF_DESCS_PER_LOOP_AVX
);
159 /* See if we need to rearm the RX queue - gives the prefetch a bit
162 if (rxq
->rxrearm_nb
> IAVF_RXQ_REARM_THRESH
)
165 /* Before we start moving massive data around, check to see if
166 * there is actually a packet available
168 if (!(rxdp
->wb
.qword1
.status_error_len
&
169 rte_cpu_to_le_32(1 << IAVF_RX_DESC_STATUS_DD_SHIFT
)))
172 /* constants used in processing loop */
173 const __m256i crc_adjust
=
175 (/* first descriptor */
176 0, 0, 0, /* ignore non-length fields */
177 -rxq
->crc_len
, /* sub crc on data_len */
178 0, /* ignore high-16bits of pkt_len */
179 -rxq
->crc_len
, /* sub crc on pkt_len */
180 0, 0, /* ignore pkt_type field */
181 /* second descriptor */
182 0, 0, 0, /* ignore non-length fields */
183 -rxq
->crc_len
, /* sub crc on data_len */
184 0, /* ignore high-16bits of pkt_len */
185 -rxq
->crc_len
, /* sub crc on pkt_len */
186 0, 0 /* ignore pkt_type field */
189 /* 8 packets DD mask, LSB in each 32-bit value */
190 const __m256i dd_check
= _mm256_set1_epi32(1);
192 /* 8 packets EOP mask, second-LSB in each 32-bit value */
193 const __m256i eop_check
= _mm256_slli_epi32(dd_check
,
194 IAVF_RX_DESC_STATUS_EOF_SHIFT
);
196 /* mask to shuffle from desc. to mbuf (2 descriptors)*/
197 const __m256i shuf_msk
=
199 (/* first descriptor */
200 7, 6, 5, 4, /* octet 4~7, 32bits rss */
201 3, 2, /* octet 2~3, low 16 bits vlan_macip */
202 15, 14, /* octet 15~14, 16 bits data_len */
203 0xFF, 0xFF, /* skip high 16 bits pkt_len, zero out */
204 15, 14, /* octet 15~14, low 16 bits pkt_len */
205 0xFF, 0xFF, /* pkt_type set as unknown */
206 0xFF, 0xFF, /*pkt_type set as unknown */
207 /* second descriptor */
208 7, 6, 5, 4, /* octet 4~7, 32bits rss */
209 3, 2, /* octet 2~3, low 16 bits vlan_macip */
210 15, 14, /* octet 15~14, 16 bits data_len */
211 0xFF, 0xFF, /* skip high 16 bits pkt_len, zero out */
212 15, 14, /* octet 15~14, low 16 bits pkt_len */
213 0xFF, 0xFF, /* pkt_type set as unknown */
214 0xFF, 0xFF /*pkt_type set as unknown */
217 * compile-time check the above crc and shuffle layout is correct.
218 * NOTE: the first field (lowest address) is given last in set_epi
221 RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf
, pkt_len
) !=
222 offsetof(struct rte_mbuf
, rx_descriptor_fields1
) + 4);
223 RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf
, data_len
) !=
224 offsetof(struct rte_mbuf
, rx_descriptor_fields1
) + 8);
225 RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf
, vlan_tci
) !=
226 offsetof(struct rte_mbuf
, rx_descriptor_fields1
) + 10);
227 RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf
, hash
) !=
228 offsetof(struct rte_mbuf
, rx_descriptor_fields1
) + 12);
230 /* Status/Error flag masks */
232 * mask everything except RSS, flow director and VLAN flags
233 * bit2 is for VLAN tag, bit11 for flow director indication
234 * bit13:12 for RSS indication. Bits 3-5 of error
235 * field (bits 22-24) are for IP/L4 checksum errors
237 const __m256i flags_mask
=
238 _mm256_set1_epi32((1 << 2) | (1 << 11) |
239 (3 << 12) | (7 << 22));
241 * data to be shuffled by result of flag mask. If VLAN bit is set,
242 * (bit 2), then position 4 in this array will be used in the
245 const __m256i vlan_flags_shuf
=
246 _mm256_set_epi32(0, 0, PKT_RX_VLAN
| PKT_RX_VLAN_STRIPPED
, 0,
247 0, 0, PKT_RX_VLAN
| PKT_RX_VLAN_STRIPPED
, 0);
249 * data to be shuffled by result of flag mask, shifted down 11.
250 * If RSS/FDIR bits are set, shuffle moves appropriate flags in
253 const __m256i rss_flags_shuf
=
254 _mm256_set_epi8(0, 0, 0, 0, 0, 0, 0, 0,
255 PKT_RX_RSS_HASH
| PKT_RX_FDIR
, PKT_RX_RSS_HASH
,
256 0, 0, 0, 0, PKT_RX_FDIR
, 0,/* end up 128-bits */
257 0, 0, 0, 0, 0, 0, 0, 0,
258 PKT_RX_RSS_HASH
| PKT_RX_FDIR
, PKT_RX_RSS_HASH
,
259 0, 0, 0, 0, PKT_RX_FDIR
, 0);
262 * data to be shuffled by the result of the flags mask shifted by 22
263 * bits. This gives use the l3_l4 flags.
265 const __m256i l3_l4_flags_shuf
= _mm256_set_epi8(0, 0, 0, 0, 0, 0, 0, 0,
266 /* shift right 1 bit to make sure it not exceed 255 */
267 (PKT_RX_EIP_CKSUM_BAD
| PKT_RX_L4_CKSUM_BAD
|
268 PKT_RX_IP_CKSUM_BAD
) >> 1,
269 (PKT_RX_IP_CKSUM_GOOD
| PKT_RX_EIP_CKSUM_BAD
|
270 PKT_RX_L4_CKSUM_BAD
) >> 1,
271 (PKT_RX_EIP_CKSUM_BAD
| PKT_RX_IP_CKSUM_BAD
) >> 1,
272 (PKT_RX_IP_CKSUM_GOOD
| PKT_RX_EIP_CKSUM_BAD
) >> 1,
273 (PKT_RX_L4_CKSUM_BAD
| PKT_RX_IP_CKSUM_BAD
) >> 1,
274 (PKT_RX_IP_CKSUM_GOOD
| PKT_RX_L4_CKSUM_BAD
) >> 1,
275 PKT_RX_IP_CKSUM_BAD
>> 1,
276 (PKT_RX_IP_CKSUM_GOOD
| PKT_RX_L4_CKSUM_GOOD
) >> 1,
277 /* second 128-bits */
278 0, 0, 0, 0, 0, 0, 0, 0,
279 (PKT_RX_EIP_CKSUM_BAD
| PKT_RX_L4_CKSUM_BAD
|
280 PKT_RX_IP_CKSUM_BAD
) >> 1,
281 (PKT_RX_IP_CKSUM_GOOD
| PKT_RX_EIP_CKSUM_BAD
|
282 PKT_RX_L4_CKSUM_BAD
) >> 1,
283 (PKT_RX_EIP_CKSUM_BAD
| PKT_RX_IP_CKSUM_BAD
) >> 1,
284 (PKT_RX_IP_CKSUM_GOOD
| PKT_RX_EIP_CKSUM_BAD
) >> 1,
285 (PKT_RX_L4_CKSUM_BAD
| PKT_RX_IP_CKSUM_BAD
) >> 1,
286 (PKT_RX_IP_CKSUM_GOOD
| PKT_RX_L4_CKSUM_BAD
) >> 1,
287 PKT_RX_IP_CKSUM_BAD
>> 1,
288 (PKT_RX_IP_CKSUM_GOOD
| PKT_RX_L4_CKSUM_GOOD
) >> 1);
290 const __m256i cksum_mask
=
291 _mm256_set1_epi32(PKT_RX_IP_CKSUM_GOOD
| PKT_RX_IP_CKSUM_BAD
|
292 PKT_RX_L4_CKSUM_GOOD
| PKT_RX_L4_CKSUM_BAD
|
293 PKT_RX_EIP_CKSUM_BAD
);
295 RTE_SET_USED(avx_aligned
); /* for 32B descriptors we don't use this */
297 uint16_t i
, received
;
299 for (i
= 0, received
= 0; i
< nb_pkts
;
300 i
+= IAVF_DESCS_PER_LOOP_AVX
,
301 rxdp
+= IAVF_DESCS_PER_LOOP_AVX
) {
302 /* step 1, copy over 8 mbuf pointers to rx_pkts array */
303 _mm256_storeu_si256((void *)&rx_pkts
[i
],
304 _mm256_loadu_si256((void *)&sw_ring
[i
]));
305 #ifdef RTE_ARCH_X86_64
307 ((void *)&rx_pkts
[i
+ 4],
308 _mm256_loadu_si256((void *)&sw_ring
[i
+ 4]));
311 __m256i raw_desc0_1
, raw_desc2_3
, raw_desc4_5
, raw_desc6_7
;
312 #ifdef RTE_LIBRTE_IAVF_16BYTE_RX_DESC
313 /* for AVX we need alignment otherwise loads are not atomic */
315 /* load in descriptors, 2 at a time, in reverse order */
316 raw_desc6_7
= _mm256_load_si256((void *)(rxdp
+ 6));
317 rte_compiler_barrier();
318 raw_desc4_5
= _mm256_load_si256((void *)(rxdp
+ 4));
319 rte_compiler_barrier();
320 raw_desc2_3
= _mm256_load_si256((void *)(rxdp
+ 2));
321 rte_compiler_barrier();
322 raw_desc0_1
= _mm256_load_si256((void *)(rxdp
+ 0));
326 const __m128i raw_desc7
=
327 _mm_load_si128((void *)(rxdp
+ 7));
328 rte_compiler_barrier();
329 const __m128i raw_desc6
=
330 _mm_load_si128((void *)(rxdp
+ 6));
331 rte_compiler_barrier();
332 const __m128i raw_desc5
=
333 _mm_load_si128((void *)(rxdp
+ 5));
334 rte_compiler_barrier();
335 const __m128i raw_desc4
=
336 _mm_load_si128((void *)(rxdp
+ 4));
337 rte_compiler_barrier();
338 const __m128i raw_desc3
=
339 _mm_load_si128((void *)(rxdp
+ 3));
340 rte_compiler_barrier();
341 const __m128i raw_desc2
=
342 _mm_load_si128((void *)(rxdp
+ 2));
343 rte_compiler_barrier();
344 const __m128i raw_desc1
=
345 _mm_load_si128((void *)(rxdp
+ 1));
346 rte_compiler_barrier();
347 const __m128i raw_desc0
=
348 _mm_load_si128((void *)(rxdp
+ 0));
351 _mm256_inserti128_si256
352 (_mm256_castsi128_si256(raw_desc6
),
355 _mm256_inserti128_si256
356 (_mm256_castsi128_si256(raw_desc4
),
359 _mm256_inserti128_si256
360 (_mm256_castsi128_si256(raw_desc2
),
363 _mm256_inserti128_si256
364 (_mm256_castsi128_si256(raw_desc0
),
371 for (j
= 0; j
< IAVF_DESCS_PER_LOOP_AVX
; j
++)
372 rte_mbuf_prefetch_part2(rx_pkts
[i
+ j
]);
376 * convert descriptors 4-7 into mbufs, adjusting length and
377 * re-arranging fields. Then write into the mbuf
379 const __m256i len6_7
= _mm256_slli_epi32(raw_desc6_7
,
381 const __m256i len4_5
= _mm256_slli_epi32(raw_desc4_5
,
383 const __m256i desc6_7
= _mm256_blend_epi16(raw_desc6_7
,
385 const __m256i desc4_5
= _mm256_blend_epi16(raw_desc4_5
,
387 __m256i mb6_7
= _mm256_shuffle_epi8(desc6_7
, shuf_msk
);
388 __m256i mb4_5
= _mm256_shuffle_epi8(desc4_5
, shuf_msk
);
390 mb6_7
= _mm256_add_epi16(mb6_7
, crc_adjust
);
391 mb4_5
= _mm256_add_epi16(mb4_5
, crc_adjust
);
393 * to get packet types, shift 64-bit values down 30 bits
394 * and so ptype is in lower 8-bits in each
396 const __m256i ptypes6_7
= _mm256_srli_epi64(desc6_7
, 30);
397 const __m256i ptypes4_5
= _mm256_srli_epi64(desc4_5
, 30);
398 const uint8_t ptype7
= _mm256_extract_epi8(ptypes6_7
, 24);
399 const uint8_t ptype6
= _mm256_extract_epi8(ptypes6_7
, 8);
400 const uint8_t ptype5
= _mm256_extract_epi8(ptypes4_5
, 24);
401 const uint8_t ptype4
= _mm256_extract_epi8(ptypes4_5
, 8);
403 mb6_7
= _mm256_insert_epi32(mb6_7
, type_table
[ptype7
], 4);
404 mb6_7
= _mm256_insert_epi32(mb6_7
, type_table
[ptype6
], 0);
405 mb4_5
= _mm256_insert_epi32(mb4_5
, type_table
[ptype5
], 4);
406 mb4_5
= _mm256_insert_epi32(mb4_5
, type_table
[ptype4
], 0);
407 /* merge the status bits into one register */
408 const __m256i status4_7
= _mm256_unpackhi_epi32(desc6_7
,
412 * convert descriptors 0-3 into mbufs, adjusting length and
413 * re-arranging fields. Then write into the mbuf
415 const __m256i len2_3
= _mm256_slli_epi32(raw_desc2_3
,
417 const __m256i len0_1
= _mm256_slli_epi32(raw_desc0_1
,
419 const __m256i desc2_3
= _mm256_blend_epi16(raw_desc2_3
,
421 const __m256i desc0_1
= _mm256_blend_epi16(raw_desc0_1
,
423 __m256i mb2_3
= _mm256_shuffle_epi8(desc2_3
, shuf_msk
);
424 __m256i mb0_1
= _mm256_shuffle_epi8(desc0_1
, shuf_msk
);
426 mb2_3
= _mm256_add_epi16(mb2_3
, crc_adjust
);
427 mb0_1
= _mm256_add_epi16(mb0_1
, crc_adjust
);
428 /* get the packet types */
429 const __m256i ptypes2_3
= _mm256_srli_epi64(desc2_3
, 30);
430 const __m256i ptypes0_1
= _mm256_srli_epi64(desc0_1
, 30);
431 const uint8_t ptype3
= _mm256_extract_epi8(ptypes2_3
, 24);
432 const uint8_t ptype2
= _mm256_extract_epi8(ptypes2_3
, 8);
433 const uint8_t ptype1
= _mm256_extract_epi8(ptypes0_1
, 24);
434 const uint8_t ptype0
= _mm256_extract_epi8(ptypes0_1
, 8);
436 mb2_3
= _mm256_insert_epi32(mb2_3
, type_table
[ptype3
], 4);
437 mb2_3
= _mm256_insert_epi32(mb2_3
, type_table
[ptype2
], 0);
438 mb0_1
= _mm256_insert_epi32(mb0_1
, type_table
[ptype1
], 4);
439 mb0_1
= _mm256_insert_epi32(mb0_1
, type_table
[ptype0
], 0);
440 /* merge the status bits into one register */
441 const __m256i status0_3
= _mm256_unpackhi_epi32(desc2_3
,
445 * take the two sets of status bits and merge to one
446 * After merge, the packets status flags are in the
447 * order (hi->lo): [1, 3, 5, 7, 0, 2, 4, 6]
449 __m256i status0_7
= _mm256_unpacklo_epi64(status4_7
,
452 /* now do flag manipulation */
454 /* get only flag/error bits we want */
455 const __m256i flag_bits
=
456 _mm256_and_si256(status0_7
, flags_mask
);
457 /* set vlan and rss flags */
458 const __m256i vlan_flags
=
459 _mm256_shuffle_epi8(vlan_flags_shuf
, flag_bits
);
460 const __m256i rss_flags
=
461 _mm256_shuffle_epi8(rss_flags_shuf
,
462 _mm256_srli_epi32(flag_bits
, 11));
464 * l3_l4_error flags, shuffle, then shift to correct adjustment
465 * of flags in flags_shuf, and finally mask out extra bits
467 __m256i l3_l4_flags
= _mm256_shuffle_epi8(l3_l4_flags_shuf
,
468 _mm256_srli_epi32(flag_bits
, 22));
469 l3_l4_flags
= _mm256_slli_epi32(l3_l4_flags
, 1);
470 l3_l4_flags
= _mm256_and_si256(l3_l4_flags
, cksum_mask
);
473 const __m256i mbuf_flags
= _mm256_or_si256(l3_l4_flags
,
474 _mm256_or_si256(rss_flags
, vlan_flags
));
476 * At this point, we have the 8 sets of flags in the low 16-bits
477 * of each 32-bit value in vlan0.
478 * We want to extract these, and merge them with the mbuf init
479 * data so we can do a single write to the mbuf to set the flags
480 * and all the other initialization fields. Extracting the
481 * appropriate flags means that we have to do a shift and blend
482 * for each mbuf before we do the write. However, we can also
483 * add in the previously computed rx_descriptor fields to
484 * make a single 256-bit write per mbuf
486 /* check the structure matches expectations */
487 RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf
, ol_flags
) !=
488 offsetof(struct rte_mbuf
, rearm_data
) + 8);
489 RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf
, rearm_data
) !=
490 RTE_ALIGN(offsetof(struct rte_mbuf
,
493 /* build up data and do writes */
494 __m256i rearm0
, rearm1
, rearm2
, rearm3
, rearm4
, rearm5
,
496 rearm6
= _mm256_blend_epi32(mbuf_init
,
497 _mm256_slli_si256(mbuf_flags
, 8),
499 rearm4
= _mm256_blend_epi32(mbuf_init
,
500 _mm256_slli_si256(mbuf_flags
, 4),
502 rearm2
= _mm256_blend_epi32(mbuf_init
, mbuf_flags
, 0x04);
503 rearm0
= _mm256_blend_epi32(mbuf_init
,
504 _mm256_srli_si256(mbuf_flags
, 4),
506 /* permute to add in the rx_descriptor e.g. rss fields */
507 rearm6
= _mm256_permute2f128_si256(rearm6
, mb6_7
, 0x20);
508 rearm4
= _mm256_permute2f128_si256(rearm4
, mb4_5
, 0x20);
509 rearm2
= _mm256_permute2f128_si256(rearm2
, mb2_3
, 0x20);
510 rearm0
= _mm256_permute2f128_si256(rearm0
, mb0_1
, 0x20);
512 _mm256_storeu_si256((__m256i
*)&rx_pkts
[i
+ 6]->rearm_data
,
514 _mm256_storeu_si256((__m256i
*)&rx_pkts
[i
+ 4]->rearm_data
,
516 _mm256_storeu_si256((__m256i
*)&rx_pkts
[i
+ 2]->rearm_data
,
518 _mm256_storeu_si256((__m256i
*)&rx_pkts
[i
+ 0]->rearm_data
,
521 /* repeat for the odd mbufs */
522 const __m256i odd_flags
=
523 _mm256_castsi128_si256
524 (_mm256_extracti128_si256(mbuf_flags
, 1));
525 rearm7
= _mm256_blend_epi32(mbuf_init
,
526 _mm256_slli_si256(odd_flags
, 8),
528 rearm5
= _mm256_blend_epi32(mbuf_init
,
529 _mm256_slli_si256(odd_flags
, 4),
531 rearm3
= _mm256_blend_epi32(mbuf_init
, odd_flags
, 0x04);
532 rearm1
= _mm256_blend_epi32(mbuf_init
,
533 _mm256_srli_si256(odd_flags
, 4),
535 /* since odd mbufs are already in hi 128-bits use blend */
536 rearm7
= _mm256_blend_epi32(rearm7
, mb6_7
, 0xF0);
537 rearm5
= _mm256_blend_epi32(rearm5
, mb4_5
, 0xF0);
538 rearm3
= _mm256_blend_epi32(rearm3
, mb2_3
, 0xF0);
539 rearm1
= _mm256_blend_epi32(rearm1
, mb0_1
, 0xF0);
540 /* again write to mbufs */
541 _mm256_storeu_si256((__m256i
*)&rx_pkts
[i
+ 7]->rearm_data
,
543 _mm256_storeu_si256((__m256i
*)&rx_pkts
[i
+ 5]->rearm_data
,
545 _mm256_storeu_si256((__m256i
*)&rx_pkts
[i
+ 3]->rearm_data
,
547 _mm256_storeu_si256((__m256i
*)&rx_pkts
[i
+ 1]->rearm_data
,
550 /* extract and record EOP bit */
552 const __m128i eop_mask
=
553 _mm_set1_epi16(1 << IAVF_RX_DESC_STATUS_EOF_SHIFT
);
554 const __m256i eop_bits256
= _mm256_and_si256(status0_7
,
556 /* pack status bits into a single 128-bit register */
557 const __m128i eop_bits
=
559 (_mm256_castsi256_si128(eop_bits256
),
560 _mm256_extractf128_si256(eop_bits256
,
563 * flip bits, and mask out the EOP bit, which is now
564 * a split-packet bit i.e. !EOP, rather than EOP one.
566 __m128i split_bits
= _mm_andnot_si128(eop_bits
,
569 * eop bits are out of order, so we need to shuffle them
570 * back into order again. In doing so, only use low 8
571 * bits, which acts like another pack instruction
572 * The original order is (hi->lo): 1,3,5,7,0,2,4,6
573 * [Since we use epi8, the 16-bit positions are
574 * multiplied by 2 in the eop_shuffle value.]
576 __m128i eop_shuffle
=
577 _mm_set_epi8(/* zero hi 64b */
578 0xFF, 0xFF, 0xFF, 0xFF,
579 0xFF, 0xFF, 0xFF, 0xFF,
580 /* move values to lo 64b */
583 split_bits
= _mm_shuffle_epi8(split_bits
, eop_shuffle
);
584 *(uint64_t *)split_packet
=
585 _mm_cvtsi128_si64(split_bits
);
586 split_packet
+= IAVF_DESCS_PER_LOOP_AVX
;
589 /* perform dd_check */
590 status0_7
= _mm256_and_si256(status0_7
, dd_check
);
591 status0_7
= _mm256_packs_epi32(status0_7
,
592 _mm256_setzero_si256());
594 uint64_t burst
= __builtin_popcountll
596 (_mm256_extracti128_si256
598 burst
+= __builtin_popcountll
600 (_mm256_castsi256_si128(status0_7
)));
602 if (burst
!= IAVF_DESCS_PER_LOOP_AVX
)
606 /* update tail pointers */
607 rxq
->rx_tail
+= received
;
608 rxq
->rx_tail
&= (rxq
->nb_rx_desc
- 1);
609 if ((rxq
->rx_tail
& 1) == 1 && received
> 1) { /* keep avx2 aligned */
613 rxq
->rxrearm_nb
+= received
;
617 static inline __m256i
618 flex_rxd_to_fdir_flags_vec_avx2(const __m256i fdir_id0_7
)
620 #define FDID_MIS_MAGIC 0xFFFFFFFF
621 RTE_BUILD_BUG_ON(PKT_RX_FDIR
!= (1 << 2));
622 RTE_BUILD_BUG_ON(PKT_RX_FDIR_ID
!= (1 << 13));
623 const __m256i pkt_fdir_bit
= _mm256_set1_epi32(PKT_RX_FDIR
|
625 /* desc->flow_id field == 0xFFFFFFFF means fdir mismatch */
626 const __m256i fdir_mis_mask
= _mm256_set1_epi32(FDID_MIS_MAGIC
);
627 __m256i fdir_mask
= _mm256_cmpeq_epi32(fdir_id0_7
,
629 /* this XOR op results to bit-reverse the fdir_mask */
630 fdir_mask
= _mm256_xor_si256(fdir_mask
, fdir_mis_mask
);
631 const __m256i fdir_flags
= _mm256_and_si256(fdir_mask
, pkt_fdir_bit
);
636 static inline uint16_t
637 _iavf_recv_raw_pkts_vec_avx2_flex_rxd(struct iavf_rx_queue
*rxq
,
638 struct rte_mbuf
**rx_pkts
,
639 uint16_t nb_pkts
, uint8_t *split_packet
)
641 #define IAVF_DESCS_PER_LOOP_AVX 8
643 const uint32_t *type_table
= rxq
->vsi
->adapter
->ptype_tbl
;
645 const __m256i mbuf_init
= _mm256_set_epi64x(0, 0,
646 0, rxq
->mbuf_initializer
);
647 struct rte_mbuf
**sw_ring
= &rxq
->sw_ring
[rxq
->rx_tail
];
648 volatile union iavf_rx_flex_desc
*rxdp
=
649 (union iavf_rx_flex_desc
*)rxq
->rx_ring
+ rxq
->rx_tail
;
653 /* nb_pkts has to be floor-aligned to IAVF_DESCS_PER_LOOP_AVX */
654 nb_pkts
= RTE_ALIGN_FLOOR(nb_pkts
, IAVF_DESCS_PER_LOOP_AVX
);
656 /* See if we need to rearm the RX queue - gives the prefetch a bit
659 if (rxq
->rxrearm_nb
> IAVF_RXQ_REARM_THRESH
)
662 /* Before we start moving massive data around, check to see if
663 * there is actually a packet available
665 if (!(rxdp
->wb
.status_error0
&
666 rte_cpu_to_le_32(1 << IAVF_RX_FLEX_DESC_STATUS0_DD_S
)))
669 /* constants used in processing loop */
670 const __m256i crc_adjust
=
672 (/* first descriptor */
673 0, 0, 0, /* ignore non-length fields */
674 -rxq
->crc_len
, /* sub crc on data_len */
675 0, /* ignore high-16bits of pkt_len */
676 -rxq
->crc_len
, /* sub crc on pkt_len */
677 0, 0, /* ignore pkt_type field */
678 /* second descriptor */
679 0, 0, 0, /* ignore non-length fields */
680 -rxq
->crc_len
, /* sub crc on data_len */
681 0, /* ignore high-16bits of pkt_len */
682 -rxq
->crc_len
, /* sub crc on pkt_len */
683 0, 0 /* ignore pkt_type field */
686 /* 8 packets DD mask, LSB in each 32-bit value */
687 const __m256i dd_check
= _mm256_set1_epi32(1);
689 /* 8 packets EOP mask, second-LSB in each 32-bit value */
690 const __m256i eop_check
= _mm256_slli_epi32(dd_check
,
691 IAVF_RX_FLEX_DESC_STATUS0_EOF_S
);
693 /* mask to shuffle from desc. to mbuf (2 descriptors)*/
694 const __m256i shuf_msk
=
696 (/* first descriptor */
698 0xFF, 0xFF, /* rss hash parsed separately */
699 11, 10, /* octet 10~11, 16 bits vlan_macip */
700 5, 4, /* octet 4~5, 16 bits data_len */
701 0xFF, 0xFF, /* skip hi 16 bits pkt_len, zero out */
702 5, 4, /* octet 4~5, 16 bits pkt_len */
703 0xFF, 0xFF, /* pkt_type set as unknown */
704 0xFF, 0xFF, /*pkt_type set as unknown */
705 /* second descriptor */
707 0xFF, 0xFF, /* rss hash parsed separately */
708 11, 10, /* octet 10~11, 16 bits vlan_macip */
709 5, 4, /* octet 4~5, 16 bits data_len */
710 0xFF, 0xFF, /* skip hi 16 bits pkt_len, zero out */
711 5, 4, /* octet 4~5, 16 bits pkt_len */
712 0xFF, 0xFF, /* pkt_type set as unknown */
713 0xFF, 0xFF /*pkt_type set as unknown */
716 * compile-time check the above crc and shuffle layout is correct.
717 * NOTE: the first field (lowest address) is given last in set_epi
720 RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf
, pkt_len
) !=
721 offsetof(struct rte_mbuf
, rx_descriptor_fields1
) + 4);
722 RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf
, data_len
) !=
723 offsetof(struct rte_mbuf
, rx_descriptor_fields1
) + 8);
724 RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf
, vlan_tci
) !=
725 offsetof(struct rte_mbuf
, rx_descriptor_fields1
) + 10);
726 RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf
, hash
) !=
727 offsetof(struct rte_mbuf
, rx_descriptor_fields1
) + 12);
729 /* Status/Error flag masks */
731 * mask everything except Checksum Reports, RSS indication
732 * and VLAN indication.
733 * bit6:4 for IP/L4 checksum errors.
734 * bit12 is for RSS indication.
735 * bit13 is for VLAN indication.
737 const __m256i flags_mask
=
738 _mm256_set1_epi32((7 << 4) | (1 << 12) | (1 << 13));
740 * data to be shuffled by the result of the flags mask shifted by 4
741 * bits. This gives use the l3_l4 flags.
743 const __m256i l3_l4_flags_shuf
= _mm256_set_epi8(0, 0, 0, 0, 0, 0, 0, 0,
744 /* shift right 1 bit to make sure it not exceed 255 */
745 (PKT_RX_EIP_CKSUM_BAD
| PKT_RX_L4_CKSUM_BAD
|
746 PKT_RX_IP_CKSUM_BAD
) >> 1,
747 (PKT_RX_EIP_CKSUM_BAD
| PKT_RX_L4_CKSUM_BAD
|
748 PKT_RX_IP_CKSUM_GOOD
) >> 1,
749 (PKT_RX_EIP_CKSUM_BAD
| PKT_RX_L4_CKSUM_GOOD
|
750 PKT_RX_IP_CKSUM_BAD
) >> 1,
751 (PKT_RX_EIP_CKSUM_BAD
| PKT_RX_L4_CKSUM_GOOD
|
752 PKT_RX_IP_CKSUM_GOOD
) >> 1,
753 (PKT_RX_L4_CKSUM_BAD
| PKT_RX_IP_CKSUM_BAD
) >> 1,
754 (PKT_RX_L4_CKSUM_BAD
| PKT_RX_IP_CKSUM_GOOD
) >> 1,
755 (PKT_RX_L4_CKSUM_GOOD
| PKT_RX_IP_CKSUM_BAD
) >> 1,
756 (PKT_RX_L4_CKSUM_GOOD
| PKT_RX_IP_CKSUM_GOOD
) >> 1,
757 /* second 128-bits */
758 0, 0, 0, 0, 0, 0, 0, 0,
759 (PKT_RX_EIP_CKSUM_BAD
| PKT_RX_L4_CKSUM_BAD
|
760 PKT_RX_IP_CKSUM_BAD
) >> 1,
761 (PKT_RX_EIP_CKSUM_BAD
| PKT_RX_L4_CKSUM_BAD
|
762 PKT_RX_IP_CKSUM_GOOD
) >> 1,
763 (PKT_RX_EIP_CKSUM_BAD
| PKT_RX_L4_CKSUM_GOOD
|
764 PKT_RX_IP_CKSUM_BAD
) >> 1,
765 (PKT_RX_EIP_CKSUM_BAD
| PKT_RX_L4_CKSUM_GOOD
|
766 PKT_RX_IP_CKSUM_GOOD
) >> 1,
767 (PKT_RX_L4_CKSUM_BAD
| PKT_RX_IP_CKSUM_BAD
) >> 1,
768 (PKT_RX_L4_CKSUM_BAD
| PKT_RX_IP_CKSUM_GOOD
) >> 1,
769 (PKT_RX_L4_CKSUM_GOOD
| PKT_RX_IP_CKSUM_BAD
) >> 1,
770 (PKT_RX_L4_CKSUM_GOOD
| PKT_RX_IP_CKSUM_GOOD
) >> 1);
771 const __m256i cksum_mask
=
772 _mm256_set1_epi32(PKT_RX_IP_CKSUM_GOOD
| PKT_RX_IP_CKSUM_BAD
|
773 PKT_RX_L4_CKSUM_GOOD
| PKT_RX_L4_CKSUM_BAD
|
774 PKT_RX_EIP_CKSUM_BAD
);
776 * data to be shuffled by result of flag mask, shifted down 12.
777 * If RSS(bit12)/VLAN(bit13) are set,
778 * shuffle moves appropriate flags in place.
780 const __m256i rss_vlan_flags_shuf
= _mm256_set_epi8(0, 0, 0, 0,
783 PKT_RX_RSS_HASH
| PKT_RX_VLAN
| PKT_RX_VLAN_STRIPPED
,
784 PKT_RX_VLAN
| PKT_RX_VLAN_STRIPPED
,
786 /* end up 128-bits */
790 PKT_RX_RSS_HASH
| PKT_RX_VLAN
| PKT_RX_VLAN_STRIPPED
,
791 PKT_RX_VLAN
| PKT_RX_VLAN_STRIPPED
,
794 uint16_t i
, received
;
796 for (i
= 0, received
= 0; i
< nb_pkts
;
797 i
+= IAVF_DESCS_PER_LOOP_AVX
,
798 rxdp
+= IAVF_DESCS_PER_LOOP_AVX
) {
799 /* step 1, copy over 8 mbuf pointers to rx_pkts array */
800 _mm256_storeu_si256((void *)&rx_pkts
[i
],
801 _mm256_loadu_si256((void *)&sw_ring
[i
]));
802 #ifdef RTE_ARCH_X86_64
804 ((void *)&rx_pkts
[i
+ 4],
805 _mm256_loadu_si256((void *)&sw_ring
[i
+ 4]));
808 __m256i raw_desc0_1
, raw_desc2_3
, raw_desc4_5
, raw_desc6_7
;
810 const __m128i raw_desc7
=
811 _mm_load_si128((void *)(rxdp
+ 7));
812 rte_compiler_barrier();
813 const __m128i raw_desc6
=
814 _mm_load_si128((void *)(rxdp
+ 6));
815 rte_compiler_barrier();
816 const __m128i raw_desc5
=
817 _mm_load_si128((void *)(rxdp
+ 5));
818 rte_compiler_barrier();
819 const __m128i raw_desc4
=
820 _mm_load_si128((void *)(rxdp
+ 4));
821 rte_compiler_barrier();
822 const __m128i raw_desc3
=
823 _mm_load_si128((void *)(rxdp
+ 3));
824 rte_compiler_barrier();
825 const __m128i raw_desc2
=
826 _mm_load_si128((void *)(rxdp
+ 2));
827 rte_compiler_barrier();
828 const __m128i raw_desc1
=
829 _mm_load_si128((void *)(rxdp
+ 1));
830 rte_compiler_barrier();
831 const __m128i raw_desc0
=
832 _mm_load_si128((void *)(rxdp
+ 0));
835 _mm256_inserti128_si256
836 (_mm256_castsi128_si256(raw_desc6
),
839 _mm256_inserti128_si256
840 (_mm256_castsi128_si256(raw_desc4
),
843 _mm256_inserti128_si256
844 (_mm256_castsi128_si256(raw_desc2
),
847 _mm256_inserti128_si256
848 (_mm256_castsi128_si256(raw_desc0
),
854 for (j
= 0; j
< IAVF_DESCS_PER_LOOP_AVX
; j
++)
855 rte_mbuf_prefetch_part2(rx_pkts
[i
+ j
]);
859 * convert descriptors 4-7 into mbufs, re-arrange fields.
860 * Then write into the mbuf.
862 __m256i mb6_7
= _mm256_shuffle_epi8(raw_desc6_7
, shuf_msk
);
863 __m256i mb4_5
= _mm256_shuffle_epi8(raw_desc4_5
, shuf_msk
);
865 mb6_7
= _mm256_add_epi16(mb6_7
, crc_adjust
);
866 mb4_5
= _mm256_add_epi16(mb4_5
, crc_adjust
);
868 * to get packet types, ptype is located in bit16-25
871 const __m256i ptype_mask
=
872 _mm256_set1_epi16(IAVF_RX_FLEX_DESC_PTYPE_M
);
873 const __m256i ptypes6_7
=
874 _mm256_and_si256(raw_desc6_7
, ptype_mask
);
875 const __m256i ptypes4_5
=
876 _mm256_and_si256(raw_desc4_5
, ptype_mask
);
877 const uint16_t ptype7
= _mm256_extract_epi16(ptypes6_7
, 9);
878 const uint16_t ptype6
= _mm256_extract_epi16(ptypes6_7
, 1);
879 const uint16_t ptype5
= _mm256_extract_epi16(ptypes4_5
, 9);
880 const uint16_t ptype4
= _mm256_extract_epi16(ptypes4_5
, 1);
882 mb6_7
= _mm256_insert_epi32(mb6_7
, type_table
[ptype7
], 4);
883 mb6_7
= _mm256_insert_epi32(mb6_7
, type_table
[ptype6
], 0);
884 mb4_5
= _mm256_insert_epi32(mb4_5
, type_table
[ptype5
], 4);
885 mb4_5
= _mm256_insert_epi32(mb4_5
, type_table
[ptype4
], 0);
886 /* merge the status bits into one register */
887 const __m256i status4_7
= _mm256_unpackhi_epi32(raw_desc6_7
,
891 * convert descriptors 0-3 into mbufs, re-arrange fields.
892 * Then write into the mbuf.
894 __m256i mb2_3
= _mm256_shuffle_epi8(raw_desc2_3
, shuf_msk
);
895 __m256i mb0_1
= _mm256_shuffle_epi8(raw_desc0_1
, shuf_msk
);
897 mb2_3
= _mm256_add_epi16(mb2_3
, crc_adjust
);
898 mb0_1
= _mm256_add_epi16(mb0_1
, crc_adjust
);
900 * to get packet types, ptype is located in bit16-25
903 const __m256i ptypes2_3
=
904 _mm256_and_si256(raw_desc2_3
, ptype_mask
);
905 const __m256i ptypes0_1
=
906 _mm256_and_si256(raw_desc0_1
, ptype_mask
);
907 const uint16_t ptype3
= _mm256_extract_epi16(ptypes2_3
, 9);
908 const uint16_t ptype2
= _mm256_extract_epi16(ptypes2_3
, 1);
909 const uint16_t ptype1
= _mm256_extract_epi16(ptypes0_1
, 9);
910 const uint16_t ptype0
= _mm256_extract_epi16(ptypes0_1
, 1);
912 mb2_3
= _mm256_insert_epi32(mb2_3
, type_table
[ptype3
], 4);
913 mb2_3
= _mm256_insert_epi32(mb2_3
, type_table
[ptype2
], 0);
914 mb0_1
= _mm256_insert_epi32(mb0_1
, type_table
[ptype1
], 4);
915 mb0_1
= _mm256_insert_epi32(mb0_1
, type_table
[ptype0
], 0);
916 /* merge the status bits into one register */
917 const __m256i status0_3
= _mm256_unpackhi_epi32(raw_desc2_3
,
921 * take the two sets of status bits and merge to one
922 * After merge, the packets status flags are in the
923 * order (hi->lo): [1, 3, 5, 7, 0, 2, 4, 6]
925 __m256i status0_7
= _mm256_unpacklo_epi64(status4_7
,
928 /* now do flag manipulation */
930 /* get only flag/error bits we want */
931 const __m256i flag_bits
=
932 _mm256_and_si256(status0_7
, flags_mask
);
934 * l3_l4_error flags, shuffle, then shift to correct adjustment
935 * of flags in flags_shuf, and finally mask out extra bits
937 __m256i l3_l4_flags
= _mm256_shuffle_epi8(l3_l4_flags_shuf
,
938 _mm256_srli_epi32(flag_bits
, 4));
939 l3_l4_flags
= _mm256_slli_epi32(l3_l4_flags
, 1);
940 l3_l4_flags
= _mm256_and_si256(l3_l4_flags
, cksum_mask
);
941 /* set rss and vlan flags */
942 const __m256i rss_vlan_flag_bits
=
943 _mm256_srli_epi32(flag_bits
, 12);
944 const __m256i rss_vlan_flags
=
945 _mm256_shuffle_epi8(rss_vlan_flags_shuf
,
949 __m256i mbuf_flags
= _mm256_or_si256(l3_l4_flags
,
952 if (rxq
->fdir_enabled
) {
953 const __m256i fdir_id4_7
=
954 _mm256_unpackhi_epi32(raw_desc6_7
, raw_desc4_5
);
956 const __m256i fdir_id0_3
=
957 _mm256_unpackhi_epi32(raw_desc2_3
, raw_desc0_1
);
959 const __m256i fdir_id0_7
=
960 _mm256_unpackhi_epi64(fdir_id4_7
, fdir_id0_3
);
962 const __m256i fdir_flags
=
963 flex_rxd_to_fdir_flags_vec_avx2(fdir_id0_7
);
965 /* merge with fdir_flags */
966 mbuf_flags
= _mm256_or_si256(mbuf_flags
, fdir_flags
);
968 /* write to mbuf: have to use scalar store here */
969 rx_pkts
[i
+ 0]->hash
.fdir
.hi
=
970 _mm256_extract_epi32(fdir_id0_7
, 3);
972 rx_pkts
[i
+ 1]->hash
.fdir
.hi
=
973 _mm256_extract_epi32(fdir_id0_7
, 7);
975 rx_pkts
[i
+ 2]->hash
.fdir
.hi
=
976 _mm256_extract_epi32(fdir_id0_7
, 2);
978 rx_pkts
[i
+ 3]->hash
.fdir
.hi
=
979 _mm256_extract_epi32(fdir_id0_7
, 6);
981 rx_pkts
[i
+ 4]->hash
.fdir
.hi
=
982 _mm256_extract_epi32(fdir_id0_7
, 1);
984 rx_pkts
[i
+ 5]->hash
.fdir
.hi
=
985 _mm256_extract_epi32(fdir_id0_7
, 5);
987 rx_pkts
[i
+ 6]->hash
.fdir
.hi
=
988 _mm256_extract_epi32(fdir_id0_7
, 0);
990 rx_pkts
[i
+ 7]->hash
.fdir
.hi
=
991 _mm256_extract_epi32(fdir_id0_7
, 4);
992 } /* if() on fdir_enabled */
994 #ifndef RTE_LIBRTE_IAVF_16BYTE_RX_DESC
996 * needs to load 2nd 16B of each desc for RSS hash parsing,
997 * will cause performance drop to get into this context.
999 if (rxq
->vsi
->adapter
->eth_dev
->data
->dev_conf
.rxmode
.offloads
&
1000 DEV_RX_OFFLOAD_RSS_HASH
) {
1001 /* load bottom half of every 32B desc */
1002 const __m128i raw_desc_bh7
=
1004 ((void *)(&rxdp
[7].wb
.status_error1
));
1005 rte_compiler_barrier();
1006 const __m128i raw_desc_bh6
=
1008 ((void *)(&rxdp
[6].wb
.status_error1
));
1009 rte_compiler_barrier();
1010 const __m128i raw_desc_bh5
=
1012 ((void *)(&rxdp
[5].wb
.status_error1
));
1013 rte_compiler_barrier();
1014 const __m128i raw_desc_bh4
=
1016 ((void *)(&rxdp
[4].wb
.status_error1
));
1017 rte_compiler_barrier();
1018 const __m128i raw_desc_bh3
=
1020 ((void *)(&rxdp
[3].wb
.status_error1
));
1021 rte_compiler_barrier();
1022 const __m128i raw_desc_bh2
=
1024 ((void *)(&rxdp
[2].wb
.status_error1
));
1025 rte_compiler_barrier();
1026 const __m128i raw_desc_bh1
=
1028 ((void *)(&rxdp
[1].wb
.status_error1
));
1029 rte_compiler_barrier();
1030 const __m128i raw_desc_bh0
=
1032 ((void *)(&rxdp
[0].wb
.status_error1
));
1034 __m256i raw_desc_bh6_7
=
1035 _mm256_inserti128_si256
1036 (_mm256_castsi128_si256(raw_desc_bh6
),
1038 __m256i raw_desc_bh4_5
=
1039 _mm256_inserti128_si256
1040 (_mm256_castsi128_si256(raw_desc_bh4
),
1042 __m256i raw_desc_bh2_3
=
1043 _mm256_inserti128_si256
1044 (_mm256_castsi128_si256(raw_desc_bh2
),
1046 __m256i raw_desc_bh0_1
=
1047 _mm256_inserti128_si256
1048 (_mm256_castsi128_si256(raw_desc_bh0
),
1052 * to shift the 32b RSS hash value to the
1053 * highest 32b of each 128b before mask
1055 __m256i rss_hash6_7
=
1056 _mm256_slli_epi64(raw_desc_bh6_7
, 32);
1057 __m256i rss_hash4_5
=
1058 _mm256_slli_epi64(raw_desc_bh4_5
, 32);
1059 __m256i rss_hash2_3
=
1060 _mm256_slli_epi64(raw_desc_bh2_3
, 32);
1061 __m256i rss_hash0_1
=
1062 _mm256_slli_epi64(raw_desc_bh0_1
, 32);
1064 __m256i rss_hash_msk
=
1065 _mm256_set_epi32(0xFFFFFFFF, 0, 0, 0,
1066 0xFFFFFFFF, 0, 0, 0);
1068 rss_hash6_7
= _mm256_and_si256
1069 (rss_hash6_7
, rss_hash_msk
);
1070 rss_hash4_5
= _mm256_and_si256
1071 (rss_hash4_5
, rss_hash_msk
);
1072 rss_hash2_3
= _mm256_and_si256
1073 (rss_hash2_3
, rss_hash_msk
);
1074 rss_hash0_1
= _mm256_and_si256
1075 (rss_hash0_1
, rss_hash_msk
);
1077 mb6_7
= _mm256_or_si256(mb6_7
, rss_hash6_7
);
1078 mb4_5
= _mm256_or_si256(mb4_5
, rss_hash4_5
);
1079 mb2_3
= _mm256_or_si256(mb2_3
, rss_hash2_3
);
1080 mb0_1
= _mm256_or_si256(mb0_1
, rss_hash0_1
);
1081 } /* if() on RSS hash parsing */
1085 * At this point, we have the 8 sets of flags in the low 16-bits
1086 * of each 32-bit value in vlan0.
1087 * We want to extract these, and merge them with the mbuf init
1088 * data so we can do a single write to the mbuf to set the flags
1089 * and all the other initialization fields. Extracting the
1090 * appropriate flags means that we have to do a shift and blend
1091 * for each mbuf before we do the write. However, we can also
1092 * add in the previously computed rx_descriptor fields to
1093 * make a single 256-bit write per mbuf
1095 /* check the structure matches expectations */
1096 RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf
, ol_flags
) !=
1097 offsetof(struct rte_mbuf
, rearm_data
) + 8);
1098 RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf
, rearm_data
) !=
1099 RTE_ALIGN(offsetof(struct rte_mbuf
,
1102 /* build up data and do writes */
1103 __m256i rearm0
, rearm1
, rearm2
, rearm3
, rearm4
, rearm5
,
1105 rearm6
= _mm256_blend_epi32(mbuf_init
,
1106 _mm256_slli_si256(mbuf_flags
, 8),
1108 rearm4
= _mm256_blend_epi32(mbuf_init
,
1109 _mm256_slli_si256(mbuf_flags
, 4),
1111 rearm2
= _mm256_blend_epi32(mbuf_init
, mbuf_flags
, 0x04);
1112 rearm0
= _mm256_blend_epi32(mbuf_init
,
1113 _mm256_srli_si256(mbuf_flags
, 4),
1115 /* permute to add in the rx_descriptor e.g. rss fields */
1116 rearm6
= _mm256_permute2f128_si256(rearm6
, mb6_7
, 0x20);
1117 rearm4
= _mm256_permute2f128_si256(rearm4
, mb4_5
, 0x20);
1118 rearm2
= _mm256_permute2f128_si256(rearm2
, mb2_3
, 0x20);
1119 rearm0
= _mm256_permute2f128_si256(rearm0
, mb0_1
, 0x20);
1121 _mm256_storeu_si256((__m256i
*)&rx_pkts
[i
+ 6]->rearm_data
,
1123 _mm256_storeu_si256((__m256i
*)&rx_pkts
[i
+ 4]->rearm_data
,
1125 _mm256_storeu_si256((__m256i
*)&rx_pkts
[i
+ 2]->rearm_data
,
1127 _mm256_storeu_si256((__m256i
*)&rx_pkts
[i
+ 0]->rearm_data
,
1130 /* repeat for the odd mbufs */
1131 const __m256i odd_flags
=
1132 _mm256_castsi128_si256
1133 (_mm256_extracti128_si256(mbuf_flags
, 1));
1134 rearm7
= _mm256_blend_epi32(mbuf_init
,
1135 _mm256_slli_si256(odd_flags
, 8),
1137 rearm5
= _mm256_blend_epi32(mbuf_init
,
1138 _mm256_slli_si256(odd_flags
, 4),
1140 rearm3
= _mm256_blend_epi32(mbuf_init
, odd_flags
, 0x04);
1141 rearm1
= _mm256_blend_epi32(mbuf_init
,
1142 _mm256_srli_si256(odd_flags
, 4),
1144 /* since odd mbufs are already in hi 128-bits use blend */
1145 rearm7
= _mm256_blend_epi32(rearm7
, mb6_7
, 0xF0);
1146 rearm5
= _mm256_blend_epi32(rearm5
, mb4_5
, 0xF0);
1147 rearm3
= _mm256_blend_epi32(rearm3
, mb2_3
, 0xF0);
1148 rearm1
= _mm256_blend_epi32(rearm1
, mb0_1
, 0xF0);
1149 /* again write to mbufs */
1150 _mm256_storeu_si256((__m256i
*)&rx_pkts
[i
+ 7]->rearm_data
,
1152 _mm256_storeu_si256((__m256i
*)&rx_pkts
[i
+ 5]->rearm_data
,
1154 _mm256_storeu_si256((__m256i
*)&rx_pkts
[i
+ 3]->rearm_data
,
1156 _mm256_storeu_si256((__m256i
*)&rx_pkts
[i
+ 1]->rearm_data
,
1159 /* extract and record EOP bit */
1161 const __m128i eop_mask
=
1163 IAVF_RX_FLEX_DESC_STATUS0_EOF_S
);
1164 const __m256i eop_bits256
= _mm256_and_si256(status0_7
,
1166 /* pack status bits into a single 128-bit register */
1167 const __m128i eop_bits
=
1169 (_mm256_castsi256_si128(eop_bits256
),
1170 _mm256_extractf128_si256(eop_bits256
,
1173 * flip bits, and mask out the EOP bit, which is now
1174 * a split-packet bit i.e. !EOP, rather than EOP one.
1176 __m128i split_bits
= _mm_andnot_si128(eop_bits
,
1179 * eop bits are out of order, so we need to shuffle them
1180 * back into order again. In doing so, only use low 8
1181 * bits, which acts like another pack instruction
1182 * The original order is (hi->lo): 1,3,5,7,0,2,4,6
1183 * [Since we use epi8, the 16-bit positions are
1184 * multiplied by 2 in the eop_shuffle value.]
1186 __m128i eop_shuffle
=
1187 _mm_set_epi8(/* zero hi 64b */
1188 0xFF, 0xFF, 0xFF, 0xFF,
1189 0xFF, 0xFF, 0xFF, 0xFF,
1190 /* move values to lo 64b */
1193 split_bits
= _mm_shuffle_epi8(split_bits
, eop_shuffle
);
1194 *(uint64_t *)split_packet
=
1195 _mm_cvtsi128_si64(split_bits
);
1196 split_packet
+= IAVF_DESCS_PER_LOOP_AVX
;
1199 /* perform dd_check */
1200 status0_7
= _mm256_and_si256(status0_7
, dd_check
);
1201 status0_7
= _mm256_packs_epi32(status0_7
,
1202 _mm256_setzero_si256());
1204 uint64_t burst
= __builtin_popcountll
1206 (_mm256_extracti128_si256
1208 burst
+= __builtin_popcountll
1210 (_mm256_castsi256_si128(status0_7
)));
1212 if (burst
!= IAVF_DESCS_PER_LOOP_AVX
)
1216 /* update tail pointers */
1217 rxq
->rx_tail
+= received
;
1218 rxq
->rx_tail
&= (rxq
->nb_rx_desc
- 1);
1219 if ((rxq
->rx_tail
& 1) == 1 && received
> 1) { /* keep avx2 aligned */
1223 rxq
->rxrearm_nb
+= received
;
1229 * - nb_pkts < IAVF_DESCS_PER_LOOP, just return no packet
1232 iavf_recv_pkts_vec_avx2(void *rx_queue
, struct rte_mbuf
**rx_pkts
,
1235 return _iavf_recv_raw_pkts_vec_avx2(rx_queue
, rx_pkts
, nb_pkts
, NULL
);
1240 * - nb_pkts < IAVF_DESCS_PER_LOOP, just return no packet
1243 iavf_recv_pkts_vec_avx2_flex_rxd(void *rx_queue
, struct rte_mbuf
**rx_pkts
,
1246 return _iavf_recv_raw_pkts_vec_avx2_flex_rxd(rx_queue
, rx_pkts
,
1251 * vPMD receive routine that reassembles single burst of 32 scattered packets
1253 * - nb_pkts < IAVF_DESCS_PER_LOOP, just return no packet
1256 iavf_recv_scattered_burst_vec_avx2(void *rx_queue
, struct rte_mbuf
**rx_pkts
,
1259 struct iavf_rx_queue
*rxq
= rx_queue
;
1260 uint8_t split_flags
[IAVF_VPMD_RX_MAX_BURST
] = {0};
1262 /* get some new buffers */
1263 uint16_t nb_bufs
= _iavf_recv_raw_pkts_vec_avx2(rxq
, rx_pkts
, nb_pkts
,
1268 /* happy day case, full burst + no packets to be joined */
1269 const uint64_t *split_fl64
= (uint64_t *)split_flags
;
1271 if (!rxq
->pkt_first_seg
&&
1272 split_fl64
[0] == 0 && split_fl64
[1] == 0 &&
1273 split_fl64
[2] == 0 && split_fl64
[3] == 0)
1276 /* reassemble any packets that need reassembly*/
1279 if (!rxq
->pkt_first_seg
) {
1280 /* find the first split flag, and only reassemble then*/
1281 while (i
< nb_bufs
&& !split_flags
[i
])
1285 rxq
->pkt_first_seg
= rx_pkts
[i
];
1287 return i
+ reassemble_packets(rxq
, &rx_pkts
[i
], nb_bufs
- i
,
1292 * vPMD receive routine that reassembles scattered packets.
1293 * Main receive routine that can handle arbitrary burst sizes
1295 * - nb_pkts < IAVF_DESCS_PER_LOOP, just return no packet
1298 iavf_recv_scattered_pkts_vec_avx2(void *rx_queue
, struct rte_mbuf
**rx_pkts
,
1301 uint16_t retval
= 0;
1303 while (nb_pkts
> IAVF_VPMD_RX_MAX_BURST
) {
1304 uint16_t burst
= iavf_recv_scattered_burst_vec_avx2(rx_queue
,
1305 rx_pkts
+ retval
, IAVF_VPMD_RX_MAX_BURST
);
1308 if (burst
< IAVF_VPMD_RX_MAX_BURST
)
1311 return retval
+ iavf_recv_scattered_burst_vec_avx2(rx_queue
,
1312 rx_pkts
+ retval
, nb_pkts
);
1316 * vPMD receive routine that reassembles single burst of
1317 * 32 scattered packets for flex RxD
1319 * - nb_pkts < IAVF_DESCS_PER_LOOP, just return no packet
1322 iavf_recv_scattered_burst_vec_avx2_flex_rxd(void *rx_queue
,
1323 struct rte_mbuf
**rx_pkts
,
1326 struct iavf_rx_queue
*rxq
= rx_queue
;
1327 uint8_t split_flags
[IAVF_VPMD_RX_MAX_BURST
] = {0};
1329 /* get some new buffers */
1330 uint16_t nb_bufs
= _iavf_recv_raw_pkts_vec_avx2_flex_rxd(rxq
,
1331 rx_pkts
, nb_pkts
, split_flags
);
1335 /* happy day case, full burst + no packets to be joined */
1336 const uint64_t *split_fl64
= (uint64_t *)split_flags
;
1338 if (!rxq
->pkt_first_seg
&&
1339 split_fl64
[0] == 0 && split_fl64
[1] == 0 &&
1340 split_fl64
[2] == 0 && split_fl64
[3] == 0)
1343 /* reassemble any packets that need reassembly*/
1346 if (!rxq
->pkt_first_seg
) {
1347 /* find the first split flag, and only reassemble then*/
1348 while (i
< nb_bufs
&& !split_flags
[i
])
1352 rxq
->pkt_first_seg
= rx_pkts
[i
];
1354 return i
+ reassemble_packets(rxq
, &rx_pkts
[i
], nb_bufs
- i
,
1359 * vPMD receive routine that reassembles scattered packets for flex RxD.
1360 * Main receive routine that can handle arbitrary burst sizes
1362 * - nb_pkts < IAVF_DESCS_PER_LOOP, just return no packet
1365 iavf_recv_scattered_pkts_vec_avx2_flex_rxd(void *rx_queue
,
1366 struct rte_mbuf
**rx_pkts
,
1369 uint16_t retval
= 0;
1371 while (nb_pkts
> IAVF_VPMD_RX_MAX_BURST
) {
1373 iavf_recv_scattered_burst_vec_avx2_flex_rxd
1374 (rx_queue
, rx_pkts
+ retval
, IAVF_VPMD_RX_MAX_BURST
);
1377 if (burst
< IAVF_VPMD_RX_MAX_BURST
)
1380 return retval
+ iavf_recv_scattered_burst_vec_avx2_flex_rxd(rx_queue
,
1381 rx_pkts
+ retval
, nb_pkts
);
1385 iavf_vtx1(volatile struct iavf_tx_desc
*txdp
,
1386 struct rte_mbuf
*pkt
, uint64_t flags
)
1389 (IAVF_TX_DESC_DTYPE_DATA
|
1390 ((uint64_t)flags
<< IAVF_TXD_QW1_CMD_SHIFT
) |
1391 ((uint64_t)pkt
->data_len
<< IAVF_TXD_QW1_TX_BUF_SZ_SHIFT
));
1393 __m128i descriptor
= _mm_set_epi64x(high_qw
,
1394 pkt
->buf_physaddr
+ pkt
->data_off
);
1395 _mm_store_si128((__m128i
*)txdp
, descriptor
);
1399 iavf_vtx(volatile struct iavf_tx_desc
*txdp
,
1400 struct rte_mbuf
**pkt
, uint16_t nb_pkts
, uint64_t flags
)
1402 const uint64_t hi_qw_tmpl
= (IAVF_TX_DESC_DTYPE_DATA
|
1403 ((uint64_t)flags
<< IAVF_TXD_QW1_CMD_SHIFT
));
1405 /* if unaligned on 32-bit boundary, do one to align */
1406 if (((uintptr_t)txdp
& 0x1F) != 0 && nb_pkts
!= 0) {
1407 iavf_vtx1(txdp
, *pkt
, flags
);
1408 nb_pkts
--, txdp
++, pkt
++;
1411 /* do two at a time while possible, in bursts */
1412 for (; nb_pkts
> 3; txdp
+= 4, pkt
+= 4, nb_pkts
-= 4) {
1415 ((uint64_t)pkt
[3]->data_len
<<
1416 IAVF_TXD_QW1_TX_BUF_SZ_SHIFT
);
1419 ((uint64_t)pkt
[2]->data_len
<<
1420 IAVF_TXD_QW1_TX_BUF_SZ_SHIFT
);
1423 ((uint64_t)pkt
[1]->data_len
<<
1424 IAVF_TXD_QW1_TX_BUF_SZ_SHIFT
);
1427 ((uint64_t)pkt
[0]->data_len
<<
1428 IAVF_TXD_QW1_TX_BUF_SZ_SHIFT
);
1433 pkt
[3]->buf_physaddr
+ pkt
[3]->data_off
,
1435 pkt
[2]->buf_physaddr
+ pkt
[2]->data_off
);
1439 pkt
[1]->buf_physaddr
+ pkt
[1]->data_off
,
1441 pkt
[0]->buf_physaddr
+ pkt
[0]->data_off
);
1442 _mm256_store_si256((void *)(txdp
+ 2), desc2_3
);
1443 _mm256_store_si256((void *)txdp
, desc0_1
);
1446 /* do any last ones */
1448 iavf_vtx1(txdp
, *pkt
, flags
);
1449 txdp
++, pkt
++, nb_pkts
--;
1453 static inline uint16_t
1454 iavf_xmit_fixed_burst_vec_avx2(void *tx_queue
, struct rte_mbuf
**tx_pkts
,
1457 struct iavf_tx_queue
*txq
= (struct iavf_tx_queue
*)tx_queue
;
1458 volatile struct iavf_tx_desc
*txdp
;
1459 struct iavf_tx_entry
*txep
;
1460 uint16_t n
, nb_commit
, tx_id
;
1461 /* bit2 is reserved and must be set to 1 according to Spec */
1462 uint64_t flags
= IAVF_TX_DESC_CMD_EOP
| IAVF_TX_DESC_CMD_ICRC
;
1463 uint64_t rs
= IAVF_TX_DESC_CMD_RS
| flags
;
1465 /* cross rx_thresh boundary is not allowed */
1466 nb_pkts
= RTE_MIN(nb_pkts
, txq
->rs_thresh
);
1468 if (txq
->nb_free
< txq
->free_thresh
)
1469 iavf_tx_free_bufs(txq
);
1471 nb_commit
= nb_pkts
= (uint16_t)RTE_MIN(txq
->nb_free
, nb_pkts
);
1472 if (unlikely(nb_pkts
== 0))
1475 tx_id
= txq
->tx_tail
;
1476 txdp
= &txq
->tx_ring
[tx_id
];
1477 txep
= &txq
->sw_ring
[tx_id
];
1479 txq
->nb_free
= (uint16_t)(txq
->nb_free
- nb_pkts
);
1481 n
= (uint16_t)(txq
->nb_tx_desc
- tx_id
);
1482 if (nb_commit
>= n
) {
1483 tx_backlog_entry(txep
, tx_pkts
, n
);
1485 iavf_vtx(txdp
, tx_pkts
, n
- 1, flags
);
1489 iavf_vtx1(txdp
, *tx_pkts
++, rs
);
1491 nb_commit
= (uint16_t)(nb_commit
- n
);
1494 txq
->next_rs
= (uint16_t)(txq
->rs_thresh
- 1);
1496 /* avoid reach the end of ring */
1497 txdp
= &txq
->tx_ring
[tx_id
];
1498 txep
= &txq
->sw_ring
[tx_id
];
1501 tx_backlog_entry(txep
, tx_pkts
, nb_commit
);
1503 iavf_vtx(txdp
, tx_pkts
, nb_commit
, flags
);
1505 tx_id
= (uint16_t)(tx_id
+ nb_commit
);
1506 if (tx_id
> txq
->next_rs
) {
1507 txq
->tx_ring
[txq
->next_rs
].cmd_type_offset_bsz
|=
1508 rte_cpu_to_le_64(((uint64_t)IAVF_TX_DESC_CMD_RS
) <<
1509 IAVF_TXD_QW1_CMD_SHIFT
);
1511 (uint16_t)(txq
->next_rs
+ txq
->rs_thresh
);
1514 txq
->tx_tail
= tx_id
;
1516 IAVF_PCI_REG_WRITE(txq
->qtx_tail
, txq
->tx_tail
);
1522 iavf_xmit_pkts_vec_avx2(void *tx_queue
, struct rte_mbuf
**tx_pkts
,
1526 struct iavf_tx_queue
*txq
= (struct iavf_tx_queue
*)tx_queue
;
1531 num
= (uint16_t)RTE_MIN(nb_pkts
, txq
->rs_thresh
);
1532 ret
= iavf_xmit_fixed_burst_vec_avx2(tx_queue
, &tx_pkts
[nb_tx
],