1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2010-2015 Intel Corporation
6 #include <rte_ethdev_driver.h>
7 #include <rte_malloc.h>
9 #include "ixgbe_ethdev.h"
10 #include "ixgbe_rxtx.h"
11 #include "ixgbe_rxtx_vec_common.h"
13 #include <tmmintrin.h>
15 #ifndef __INTEL_COMPILER
16 #pragma GCC diagnostic ignored "-Wcast-qual"
20 ixgbe_rxq_rearm(struct ixgbe_rx_queue
*rxq
)
24 volatile union ixgbe_adv_rx_desc
*rxdp
;
25 struct ixgbe_rx_entry
*rxep
= &rxq
->sw_ring
[rxq
->rxrearm_start
];
26 struct rte_mbuf
*mb0
, *mb1
;
27 __m128i hdr_room
= _mm_set_epi64x(RTE_PKTMBUF_HEADROOM
,
28 RTE_PKTMBUF_HEADROOM
);
29 __m128i dma_addr0
, dma_addr1
;
31 const __m128i hba_msk
= _mm_set_epi64x(0, UINT64_MAX
);
33 rxdp
= rxq
->rx_ring
+ rxq
->rxrearm_start
;
35 /* Pull 'n' more MBUFs into the software ring */
36 if (rte_mempool_get_bulk(rxq
->mb_pool
,
38 RTE_IXGBE_RXQ_REARM_THRESH
) < 0) {
39 if (rxq
->rxrearm_nb
+ RTE_IXGBE_RXQ_REARM_THRESH
>=
41 dma_addr0
= _mm_setzero_si128();
42 for (i
= 0; i
< RTE_IXGBE_DESCS_PER_LOOP
; i
++) {
43 rxep
[i
].mbuf
= &rxq
->fake_mbuf
;
44 _mm_store_si128((__m128i
*)&rxdp
[i
].read
,
48 rte_eth_devices
[rxq
->port_id
].data
->rx_mbuf_alloc_failed
+=
49 RTE_IXGBE_RXQ_REARM_THRESH
;
53 /* Initialize the mbufs in vector, process 2 mbufs in one loop */
54 for (i
= 0; i
< RTE_IXGBE_RXQ_REARM_THRESH
; i
+= 2, rxep
+= 2) {
55 __m128i vaddr0
, vaddr1
;
60 /* load buf_addr(lo 64bit) and buf_iova(hi 64bit) */
61 RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf
, buf_iova
) !=
62 offsetof(struct rte_mbuf
, buf_addr
) + 8);
63 vaddr0
= _mm_loadu_si128((__m128i
*)&(mb0
->buf_addr
));
64 vaddr1
= _mm_loadu_si128((__m128i
*)&(mb1
->buf_addr
));
66 /* convert pa to dma_addr hdr/data */
67 dma_addr0
= _mm_unpackhi_epi64(vaddr0
, vaddr0
);
68 dma_addr1
= _mm_unpackhi_epi64(vaddr1
, vaddr1
);
70 /* add headroom to pa values */
71 dma_addr0
= _mm_add_epi64(dma_addr0
, hdr_room
);
72 dma_addr1
= _mm_add_epi64(dma_addr1
, hdr_room
);
74 /* set Header Buffer Address to zero */
75 dma_addr0
= _mm_and_si128(dma_addr0
, hba_msk
);
76 dma_addr1
= _mm_and_si128(dma_addr1
, hba_msk
);
78 /* flush desc with pa dma_addr */
79 _mm_store_si128((__m128i
*)&rxdp
++->read
, dma_addr0
);
80 _mm_store_si128((__m128i
*)&rxdp
++->read
, dma_addr1
);
83 rxq
->rxrearm_start
+= RTE_IXGBE_RXQ_REARM_THRESH
;
84 if (rxq
->rxrearm_start
>= rxq
->nb_rx_desc
)
85 rxq
->rxrearm_start
= 0;
87 rxq
->rxrearm_nb
-= RTE_IXGBE_RXQ_REARM_THRESH
;
89 rx_id
= (uint16_t) ((rxq
->rxrearm_start
== 0) ?
90 (rxq
->nb_rx_desc
- 1) : (rxq
->rxrearm_start
- 1));
92 /* Update the tail pointer on the NIC */
93 IXGBE_PCI_REG_WRITE(rxq
->rdt_reg_addr
, rx_id
);
96 #ifdef RTE_LIBRTE_SECURITY
98 desc_to_olflags_v_ipsec(__m128i descs
[4], struct rte_mbuf
**rx_pkts
)
100 __m128i sterr
, rearm
, tmp_e
, tmp_p
;
101 uint32_t *rearm0
= (uint32_t *)rx_pkts
[0]->rearm_data
+ 2;
102 uint32_t *rearm1
= (uint32_t *)rx_pkts
[1]->rearm_data
+ 2;
103 uint32_t *rearm2
= (uint32_t *)rx_pkts
[2]->rearm_data
+ 2;
104 uint32_t *rearm3
= (uint32_t *)rx_pkts
[3]->rearm_data
+ 2;
105 const __m128i ipsec_sterr_msk
=
106 _mm_set1_epi32(IXGBE_RXDADV_IPSEC_STATUS_SECP
|
107 IXGBE_RXDADV_IPSEC_ERROR_AUTH_FAILED
);
108 const __m128i ipsec_proc_msk
=
109 _mm_set1_epi32(IXGBE_RXDADV_IPSEC_STATUS_SECP
);
110 const __m128i ipsec_err_flag
=
111 _mm_set1_epi32(PKT_RX_SEC_OFFLOAD_FAILED
|
113 const __m128i ipsec_proc_flag
= _mm_set1_epi32(PKT_RX_SEC_OFFLOAD
);
115 rearm
= _mm_set_epi32(*rearm3
, *rearm2
, *rearm1
, *rearm0
);
116 sterr
= _mm_set_epi32(_mm_extract_epi32(descs
[3], 2),
117 _mm_extract_epi32(descs
[2], 2),
118 _mm_extract_epi32(descs
[1], 2),
119 _mm_extract_epi32(descs
[0], 2));
120 sterr
= _mm_and_si128(sterr
, ipsec_sterr_msk
);
121 tmp_e
= _mm_cmpeq_epi32(sterr
, ipsec_sterr_msk
);
122 tmp_p
= _mm_cmpeq_epi32(sterr
, ipsec_proc_msk
);
123 sterr
= _mm_or_si128(_mm_and_si128(tmp_e
, ipsec_err_flag
),
124 _mm_and_si128(tmp_p
, ipsec_proc_flag
));
125 rearm
= _mm_or_si128(rearm
, sterr
);
126 *rearm0
= _mm_extract_epi32(rearm
, 0);
127 *rearm1
= _mm_extract_epi32(rearm
, 1);
128 *rearm2
= _mm_extract_epi32(rearm
, 2);
129 *rearm3
= _mm_extract_epi32(rearm
, 3);
134 desc_to_olflags_v(__m128i descs
[4], __m128i mbuf_init
, uint8_t vlan_flags
,
135 struct rte_mbuf
**rx_pkts
)
137 __m128i ptype0
, ptype1
, vtag0
, vtag1
, csum
;
138 __m128i rearm0
, rearm1
, rearm2
, rearm3
;
140 /* mask everything except rss type */
141 const __m128i rsstype_msk
= _mm_set_epi16(
142 0x0000, 0x0000, 0x0000, 0x0000,
143 0x000F, 0x000F, 0x000F, 0x000F);
145 /* mask the lower byte of ol_flags */
146 const __m128i ol_flags_msk
= _mm_set_epi16(
147 0x0000, 0x0000, 0x0000, 0x0000,
148 0x00FF, 0x00FF, 0x00FF, 0x00FF);
150 /* map rss type to rss hash flag */
151 const __m128i rss_flags
= _mm_set_epi8(PKT_RX_FDIR
, 0, 0, 0,
152 0, 0, 0, PKT_RX_RSS_HASH
,
153 PKT_RX_RSS_HASH
, 0, PKT_RX_RSS_HASH
, 0,
154 PKT_RX_RSS_HASH
, PKT_RX_RSS_HASH
, PKT_RX_RSS_HASH
, 0);
156 /* mask everything except vlan present and l4/ip csum error */
157 const __m128i vlan_csum_msk
= _mm_set_epi16(
158 (IXGBE_RXDADV_ERR_TCPE
| IXGBE_RXDADV_ERR_IPE
) >> 16,
159 (IXGBE_RXDADV_ERR_TCPE
| IXGBE_RXDADV_ERR_IPE
) >> 16,
160 (IXGBE_RXDADV_ERR_TCPE
| IXGBE_RXDADV_ERR_IPE
) >> 16,
161 (IXGBE_RXDADV_ERR_TCPE
| IXGBE_RXDADV_ERR_IPE
) >> 16,
162 IXGBE_RXD_STAT_VP
, IXGBE_RXD_STAT_VP
,
163 IXGBE_RXD_STAT_VP
, IXGBE_RXD_STAT_VP
);
164 /* map vlan present (0x8), IPE (0x2), L4E (0x1) to ol_flags */
165 const __m128i vlan_csum_map_lo
= _mm_set_epi8(
167 vlan_flags
| PKT_RX_IP_CKSUM_BAD
| PKT_RX_L4_CKSUM_BAD
,
168 vlan_flags
| PKT_RX_IP_CKSUM_BAD
,
169 vlan_flags
| PKT_RX_IP_CKSUM_GOOD
| PKT_RX_L4_CKSUM_BAD
,
170 vlan_flags
| PKT_RX_IP_CKSUM_GOOD
,
172 PKT_RX_IP_CKSUM_BAD
| PKT_RX_L4_CKSUM_BAD
,
174 PKT_RX_IP_CKSUM_GOOD
| PKT_RX_L4_CKSUM_BAD
,
175 PKT_RX_IP_CKSUM_GOOD
);
177 const __m128i vlan_csum_map_hi
= _mm_set_epi8(
179 0, PKT_RX_L4_CKSUM_GOOD
>> sizeof(uint8_t), 0,
180 PKT_RX_L4_CKSUM_GOOD
>> sizeof(uint8_t),
182 0, PKT_RX_L4_CKSUM_GOOD
>> sizeof(uint8_t), 0,
183 PKT_RX_L4_CKSUM_GOOD
>> sizeof(uint8_t));
185 ptype0
= _mm_unpacklo_epi16(descs
[0], descs
[1]);
186 ptype1
= _mm_unpacklo_epi16(descs
[2], descs
[3]);
187 vtag0
= _mm_unpackhi_epi16(descs
[0], descs
[1]);
188 vtag1
= _mm_unpackhi_epi16(descs
[2], descs
[3]);
190 ptype0
= _mm_unpacklo_epi32(ptype0
, ptype1
);
191 ptype0
= _mm_and_si128(ptype0
, rsstype_msk
);
192 ptype0
= _mm_shuffle_epi8(rss_flags
, ptype0
);
194 vtag1
= _mm_unpacklo_epi32(vtag0
, vtag1
);
195 vtag1
= _mm_and_si128(vtag1
, vlan_csum_msk
);
197 /* csum bits are in the most significant, to use shuffle we need to
198 * shift them. Change mask to 0xc000 to 0x0003.
200 csum
= _mm_srli_epi16(vtag1
, 14);
202 /* now or the most significant 64 bits containing the checksum
203 * flags with the vlan present flags.
205 csum
= _mm_srli_si128(csum
, 8);
206 vtag1
= _mm_or_si128(csum
, vtag1
);
208 /* convert VP, IPE, L4E to ol_flags */
209 vtag0
= _mm_shuffle_epi8(vlan_csum_map_hi
, vtag1
);
210 vtag0
= _mm_slli_epi16(vtag0
, sizeof(uint8_t));
212 vtag1
= _mm_shuffle_epi8(vlan_csum_map_lo
, vtag1
);
213 vtag1
= _mm_and_si128(vtag1
, ol_flags_msk
);
214 vtag1
= _mm_or_si128(vtag0
, vtag1
);
216 vtag1
= _mm_or_si128(ptype0
, vtag1
);
219 * At this point, we have the 4 sets of flags in the low 64-bits
221 * We want to extract these, and merge them with the mbuf init data
222 * so we can do a single 16-byte write to the mbuf to set the flags
223 * and all the other initialization fields. Extracting the
224 * appropriate flags means that we have to do a shift and blend for
225 * each mbuf before we do the write.
227 rearm0
= _mm_blend_epi16(mbuf_init
, _mm_slli_si128(vtag1
, 8), 0x10);
228 rearm1
= _mm_blend_epi16(mbuf_init
, _mm_slli_si128(vtag1
, 6), 0x10);
229 rearm2
= _mm_blend_epi16(mbuf_init
, _mm_slli_si128(vtag1
, 4), 0x10);
230 rearm3
= _mm_blend_epi16(mbuf_init
, _mm_slli_si128(vtag1
, 2), 0x10);
232 /* write the rearm data and the olflags in one write */
233 RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf
, ol_flags
) !=
234 offsetof(struct rte_mbuf
, rearm_data
) + 8);
235 RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf
, rearm_data
) !=
236 RTE_ALIGN(offsetof(struct rte_mbuf
, rearm_data
), 16));
237 _mm_store_si128((__m128i
*)&rx_pkts
[0]->rearm_data
, rearm0
);
238 _mm_store_si128((__m128i
*)&rx_pkts
[1]->rearm_data
, rearm1
);
239 _mm_store_si128((__m128i
*)&rx_pkts
[2]->rearm_data
, rearm2
);
240 _mm_store_si128((__m128i
*)&rx_pkts
[3]->rearm_data
, rearm3
);
243 static inline uint32_t get_packet_type(int index
,
246 uint32_t tunnel_check
)
248 if (etqf_check
& (0x02 << (index
* RTE_IXGBE_DESCS_PER_LOOP
)))
249 return RTE_PTYPE_UNKNOWN
;
251 if (tunnel_check
& (0x02 << (index
* RTE_IXGBE_DESCS_PER_LOOP
))) {
252 pkt_info
&= IXGBE_PACKET_TYPE_MASK_TUNNEL
;
253 return ptype_table_tn
[pkt_info
];
256 pkt_info
&= IXGBE_PACKET_TYPE_MASK_82599
;
257 return ptype_table
[pkt_info
];
261 desc_to_ptype_v(__m128i descs
[4], uint16_t pkt_type_mask
,
262 struct rte_mbuf
**rx_pkts
)
264 __m128i etqf_mask
= _mm_set_epi64x(0x800000008000LL
, 0x800000008000LL
);
265 __m128i ptype_mask
= _mm_set_epi32(
266 pkt_type_mask
, pkt_type_mask
, pkt_type_mask
, pkt_type_mask
);
267 __m128i tunnel_mask
=
268 _mm_set_epi64x(0x100000001000LL
, 0x100000001000LL
);
270 uint32_t etqf_check
, tunnel_check
, pkt_info
;
272 __m128i ptype0
= _mm_unpacklo_epi32(descs
[0], descs
[2]);
273 __m128i ptype1
= _mm_unpacklo_epi32(descs
[1], descs
[3]);
275 /* interleave low 32 bits,
276 * now we have 4 ptypes in a XMM register
278 ptype0
= _mm_unpacklo_epi32(ptype0
, ptype1
);
280 /* create a etqf bitmask based on the etqf bit. */
281 etqf_check
= _mm_movemask_epi8(_mm_and_si128(ptype0
, etqf_mask
));
283 /* shift left by IXGBE_PACKET_TYPE_SHIFT, and apply ptype mask */
284 ptype0
= _mm_and_si128(_mm_srli_epi32(ptype0
, IXGBE_PACKET_TYPE_SHIFT
),
287 /* create a tunnel bitmask based on the tunnel bit */
288 tunnel_check
= _mm_movemask_epi8(
289 _mm_slli_epi32(_mm_and_si128(ptype0
, tunnel_mask
), 0x3));
291 pkt_info
= _mm_extract_epi32(ptype0
, 0);
292 rx_pkts
[0]->packet_type
=
293 get_packet_type(0, pkt_info
, etqf_check
, tunnel_check
);
294 pkt_info
= _mm_extract_epi32(ptype0
, 1);
295 rx_pkts
[1]->packet_type
=
296 get_packet_type(1, pkt_info
, etqf_check
, tunnel_check
);
297 pkt_info
= _mm_extract_epi32(ptype0
, 2);
298 rx_pkts
[2]->packet_type
=
299 get_packet_type(2, pkt_info
, etqf_check
, tunnel_check
);
300 pkt_info
= _mm_extract_epi32(ptype0
, 3);
301 rx_pkts
[3]->packet_type
=
302 get_packet_type(3, pkt_info
, etqf_check
, tunnel_check
);
306 * vPMD raw receive routine, only accept(nb_pkts >= RTE_IXGBE_DESCS_PER_LOOP)
309 * - nb_pkts < RTE_IXGBE_DESCS_PER_LOOP, just return no packet
310 * - nb_pkts > RTE_IXGBE_MAX_RX_BURST, only scan RTE_IXGBE_MAX_RX_BURST
312 * - floor align nb_pkts to a RTE_IXGBE_DESC_PER_LOOP power-of-two
314 static inline uint16_t
315 _recv_raw_pkts_vec(struct ixgbe_rx_queue
*rxq
, struct rte_mbuf
**rx_pkts
,
316 uint16_t nb_pkts
, uint8_t *split_packet
)
318 volatile union ixgbe_adv_rx_desc
*rxdp
;
319 struct ixgbe_rx_entry
*sw_ring
;
320 uint16_t nb_pkts_recd
;
321 #ifdef RTE_LIBRTE_SECURITY
322 uint8_t use_ipsec
= rxq
->using_ipsec
;
327 __m128i crc_adjust
= _mm_set_epi16(
328 0, 0, 0, /* ignore non-length fields */
329 -rxq
->crc_len
, /* sub crc on data_len */
330 0, /* ignore high-16bits of pkt_len */
331 -rxq
->crc_len
, /* sub crc on pkt_len */
332 0, 0 /* ignore pkt_type field */
335 * compile-time check the above crc_adjust layout is correct.
336 * NOTE: the first field (lowest address) is given last in set_epi16
339 RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf
, pkt_len
) !=
340 offsetof(struct rte_mbuf
, rx_descriptor_fields1
) + 4);
341 RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf
, data_len
) !=
342 offsetof(struct rte_mbuf
, rx_descriptor_fields1
) + 8);
343 __m128i dd_check
, eop_check
;
347 /* nb_pkts shall be less equal than RTE_IXGBE_MAX_RX_BURST */
348 nb_pkts
= RTE_MIN(nb_pkts
, RTE_IXGBE_MAX_RX_BURST
);
350 /* nb_pkts has to be floor-aligned to RTE_IXGBE_DESCS_PER_LOOP */
351 nb_pkts
= RTE_ALIGN_FLOOR(nb_pkts
, RTE_IXGBE_DESCS_PER_LOOP
);
353 /* Just the act of getting into the function from the application is
354 * going to cost about 7 cycles
356 rxdp
= rxq
->rx_ring
+ rxq
->rx_tail
;
360 /* See if we need to rearm the RX queue - gives the prefetch a bit
363 if (rxq
->rxrearm_nb
> RTE_IXGBE_RXQ_REARM_THRESH
)
364 ixgbe_rxq_rearm(rxq
);
366 /* Before we start moving massive data around, check to see if
367 * there is actually a packet available
369 if (!(rxdp
->wb
.upper
.status_error
&
370 rte_cpu_to_le_32(IXGBE_RXDADV_STAT_DD
)))
373 /* 4 packets DD mask */
374 dd_check
= _mm_set_epi64x(0x0000000100000001LL
, 0x0000000100000001LL
);
376 /* 4 packets EOP mask */
377 eop_check
= _mm_set_epi64x(0x0000000200000002LL
, 0x0000000200000002LL
);
379 /* mask to shuffle from desc. to mbuf */
380 shuf_msk
= _mm_set_epi8(
381 7, 6, 5, 4, /* octet 4~7, 32bits rss */
382 15, 14, /* octet 14~15, low 16 bits vlan_macip */
383 13, 12, /* octet 12~13, 16 bits data_len */
384 0xFF, 0xFF, /* skip high 16 bits pkt_len, zero out */
385 13, 12, /* octet 12~13, low 16 bits pkt_len */
386 0xFF, 0xFF, /* skip 32 bit pkt_type */
390 * Compile-time verify the shuffle mask
391 * NOTE: some field positions already verified above, but duplicated
392 * here for completeness in case of future modifications.
394 RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf
, pkt_len
) !=
395 offsetof(struct rte_mbuf
, rx_descriptor_fields1
) + 4);
396 RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf
, data_len
) !=
397 offsetof(struct rte_mbuf
, rx_descriptor_fields1
) + 8);
398 RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf
, vlan_tci
) !=
399 offsetof(struct rte_mbuf
, rx_descriptor_fields1
) + 10);
400 RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf
, hash
) !=
401 offsetof(struct rte_mbuf
, rx_descriptor_fields1
) + 12);
403 mbuf_init
= _mm_set_epi64x(0, rxq
->mbuf_initializer
);
405 /* Cache is empty -> need to scan the buffer rings, but first move
406 * the next 'n' mbufs into the cache
408 sw_ring
= &rxq
->sw_ring
[rxq
->rx_tail
];
410 /* ensure these 2 flags are in the lower 8 bits */
411 RTE_BUILD_BUG_ON((PKT_RX_VLAN
| PKT_RX_VLAN_STRIPPED
) > UINT8_MAX
);
412 vlan_flags
= rxq
->vlan_flags
& UINT8_MAX
;
414 /* A. load 4 packet in one loop
415 * [A*. mask out 4 unused dirty field in desc]
416 * B. copy 4 mbuf point from swring to rx_pkts
417 * C. calc the number of DD bits among the 4 packets
418 * [C*. extract the end-of-packet bit, if requested]
419 * D. fill info. from desc to mbuf
421 for (pos
= 0, nb_pkts_recd
= 0; pos
< nb_pkts
;
422 pos
+= RTE_IXGBE_DESCS_PER_LOOP
,
423 rxdp
+= RTE_IXGBE_DESCS_PER_LOOP
) {
424 __m128i descs
[RTE_IXGBE_DESCS_PER_LOOP
];
425 __m128i pkt_mb1
, pkt_mb2
, pkt_mb3
, pkt_mb4
;
426 __m128i zero
, staterr
, sterr_tmp1
, sterr_tmp2
;
427 /* 2 64 bit or 4 32 bit mbuf pointers in one XMM reg. */
429 #if defined(RTE_ARCH_X86_64)
433 /* B.1 load 2 (64 bit) or 4 (32 bit) mbuf points */
434 mbp1
= _mm_loadu_si128((__m128i
*)&sw_ring
[pos
]);
436 /* Read desc statuses backwards to avoid race condition */
437 /* A.1 load 4 pkts desc */
438 descs
[3] = _mm_loadu_si128((__m128i
*)(rxdp
+ 3));
439 rte_compiler_barrier();
441 /* B.2 copy 2 64 bit or 4 32 bit mbuf point into rx_pkts */
442 _mm_storeu_si128((__m128i
*)&rx_pkts
[pos
], mbp1
);
444 #if defined(RTE_ARCH_X86_64)
445 /* B.1 load 2 64 bit mbuf points */
446 mbp2
= _mm_loadu_si128((__m128i
*)&sw_ring
[pos
+2]);
449 descs
[2] = _mm_loadu_si128((__m128i
*)(rxdp
+ 2));
450 rte_compiler_barrier();
451 /* B.1 load 2 mbuf point */
452 descs
[1] = _mm_loadu_si128((__m128i
*)(rxdp
+ 1));
453 rte_compiler_barrier();
454 descs
[0] = _mm_loadu_si128((__m128i
*)(rxdp
));
456 #if defined(RTE_ARCH_X86_64)
457 /* B.2 copy 2 mbuf point into rx_pkts */
458 _mm_storeu_si128((__m128i
*)&rx_pkts
[pos
+2], mbp2
);
462 rte_mbuf_prefetch_part2(rx_pkts
[pos
]);
463 rte_mbuf_prefetch_part2(rx_pkts
[pos
+ 1]);
464 rte_mbuf_prefetch_part2(rx_pkts
[pos
+ 2]);
465 rte_mbuf_prefetch_part2(rx_pkts
[pos
+ 3]);
468 /* avoid compiler reorder optimization */
469 rte_compiler_barrier();
471 /* D.1 pkt 3,4 convert format from desc to pktmbuf */
472 pkt_mb4
= _mm_shuffle_epi8(descs
[3], shuf_msk
);
473 pkt_mb3
= _mm_shuffle_epi8(descs
[2], shuf_msk
);
475 /* D.1 pkt 1,2 convert format from desc to pktmbuf */
476 pkt_mb2
= _mm_shuffle_epi8(descs
[1], shuf_msk
);
477 pkt_mb1
= _mm_shuffle_epi8(descs
[0], shuf_msk
);
479 /* C.1 4=>2 filter staterr info only */
480 sterr_tmp2
= _mm_unpackhi_epi32(descs
[3], descs
[2]);
481 /* C.1 4=>2 filter staterr info only */
482 sterr_tmp1
= _mm_unpackhi_epi32(descs
[1], descs
[0]);
484 /* set ol_flags with vlan packet type */
485 desc_to_olflags_v(descs
, mbuf_init
, vlan_flags
, &rx_pkts
[pos
]);
487 #ifdef RTE_LIBRTE_SECURITY
488 if (unlikely(use_ipsec
))
489 desc_to_olflags_v_ipsec(descs
, &rx_pkts
[pos
]);
492 /* D.2 pkt 3,4 set in_port/nb_seg and remove crc */
493 pkt_mb4
= _mm_add_epi16(pkt_mb4
, crc_adjust
);
494 pkt_mb3
= _mm_add_epi16(pkt_mb3
, crc_adjust
);
496 /* C.2 get 4 pkts staterr value */
497 zero
= _mm_xor_si128(dd_check
, dd_check
);
498 staterr
= _mm_unpacklo_epi32(sterr_tmp1
, sterr_tmp2
);
500 /* D.3 copy final 3,4 data to rx_pkts */
501 _mm_storeu_si128((void *)&rx_pkts
[pos
+3]->rx_descriptor_fields1
,
503 _mm_storeu_si128((void *)&rx_pkts
[pos
+2]->rx_descriptor_fields1
,
506 /* D.2 pkt 1,2 set in_port/nb_seg and remove crc */
507 pkt_mb2
= _mm_add_epi16(pkt_mb2
, crc_adjust
);
508 pkt_mb1
= _mm_add_epi16(pkt_mb1
, crc_adjust
);
510 /* C* extract and record EOP bit */
512 __m128i eop_shuf_mask
= _mm_set_epi8(
513 0xFF, 0xFF, 0xFF, 0xFF,
514 0xFF, 0xFF, 0xFF, 0xFF,
515 0xFF, 0xFF, 0xFF, 0xFF,
516 0x04, 0x0C, 0x00, 0x08
519 /* and with mask to extract bits, flipping 1-0 */
520 __m128i eop_bits
= _mm_andnot_si128(staterr
, eop_check
);
521 /* the staterr values are not in order, as the count
522 * count of dd bits doesn't care. However, for end of
523 * packet tracking, we do care, so shuffle. This also
524 * compresses the 32-bit values to 8-bit
526 eop_bits
= _mm_shuffle_epi8(eop_bits
, eop_shuf_mask
);
527 /* store the resulting 32-bit value */
528 *(int *)split_packet
= _mm_cvtsi128_si32(eop_bits
);
529 split_packet
+= RTE_IXGBE_DESCS_PER_LOOP
;
532 /* C.3 calc available number of desc */
533 staterr
= _mm_and_si128(staterr
, dd_check
);
534 staterr
= _mm_packs_epi32(staterr
, zero
);
536 /* D.3 copy final 1,2 data to rx_pkts */
537 _mm_storeu_si128((void *)&rx_pkts
[pos
+1]->rx_descriptor_fields1
,
539 _mm_storeu_si128((void *)&rx_pkts
[pos
]->rx_descriptor_fields1
,
542 desc_to_ptype_v(descs
, rxq
->pkt_type_mask
, &rx_pkts
[pos
]);
544 /* C.4 calc avaialbe number of desc */
545 var
= __builtin_popcountll(_mm_cvtsi128_si64(staterr
));
547 if (likely(var
!= RTE_IXGBE_DESCS_PER_LOOP
))
551 /* Update our internal tail pointer */
552 rxq
->rx_tail
= (uint16_t)(rxq
->rx_tail
+ nb_pkts_recd
);
553 rxq
->rx_tail
= (uint16_t)(rxq
->rx_tail
& (rxq
->nb_rx_desc
- 1));
554 rxq
->rxrearm_nb
= (uint16_t)(rxq
->rxrearm_nb
+ nb_pkts_recd
);
560 * vPMD receive routine, only accept(nb_pkts >= RTE_IXGBE_DESCS_PER_LOOP)
563 * - nb_pkts < RTE_IXGBE_DESCS_PER_LOOP, just return no packet
564 * - nb_pkts > RTE_IXGBE_MAX_RX_BURST, only scan RTE_IXGBE_MAX_RX_BURST
566 * - floor align nb_pkts to a RTE_IXGBE_DESC_PER_LOOP power-of-two
569 ixgbe_recv_pkts_vec(void *rx_queue
, struct rte_mbuf
**rx_pkts
,
572 return _recv_raw_pkts_vec(rx_queue
, rx_pkts
, nb_pkts
, NULL
);
576 * vPMD receive routine that reassembles scattered packets
579 * - nb_pkts < RTE_IXGBE_DESCS_PER_LOOP, just return no packet
580 * - nb_pkts > RTE_IXGBE_MAX_RX_BURST, only scan RTE_IXGBE_MAX_RX_BURST
582 * - floor align nb_pkts to a RTE_IXGBE_DESC_PER_LOOP power-of-two
585 ixgbe_recv_scattered_pkts_vec(void *rx_queue
, struct rte_mbuf
**rx_pkts
,
588 struct ixgbe_rx_queue
*rxq
= rx_queue
;
589 uint8_t split_flags
[RTE_IXGBE_MAX_RX_BURST
] = {0};
591 /* get some new buffers */
592 uint16_t nb_bufs
= _recv_raw_pkts_vec(rxq
, rx_pkts
, nb_pkts
,
597 /* happy day case, full burst + no packets to be joined */
598 const uint64_t *split_fl64
= (uint64_t *)split_flags
;
599 if (rxq
->pkt_first_seg
== NULL
&&
600 split_fl64
[0] == 0 && split_fl64
[1] == 0 &&
601 split_fl64
[2] == 0 && split_fl64
[3] == 0)
604 /* reassemble any packets that need reassembly*/
606 if (rxq
->pkt_first_seg
== NULL
) {
607 /* find the first split flag, and only reassemble then*/
608 while (i
< nb_bufs
&& !split_flags
[i
])
613 return i
+ reassemble_packets(rxq
, &rx_pkts
[i
], nb_bufs
- i
,
618 vtx1(volatile union ixgbe_adv_tx_desc
*txdp
,
619 struct rte_mbuf
*pkt
, uint64_t flags
)
621 __m128i descriptor
= _mm_set_epi64x((uint64_t)pkt
->pkt_len
<< 46 |
622 flags
| pkt
->data_len
,
623 pkt
->buf_iova
+ pkt
->data_off
);
624 _mm_store_si128((__m128i
*)&txdp
->read
, descriptor
);
628 vtx(volatile union ixgbe_adv_tx_desc
*txdp
,
629 struct rte_mbuf
**pkt
, uint16_t nb_pkts
, uint64_t flags
)
633 for (i
= 0; i
< nb_pkts
; ++i
, ++txdp
, ++pkt
)
634 vtx1(txdp
, *pkt
, flags
);
638 ixgbe_xmit_fixed_burst_vec(void *tx_queue
, struct rte_mbuf
**tx_pkts
,
641 struct ixgbe_tx_queue
*txq
= (struct ixgbe_tx_queue
*)tx_queue
;
642 volatile union ixgbe_adv_tx_desc
*txdp
;
643 struct ixgbe_tx_entry_v
*txep
;
644 uint16_t n
, nb_commit
, tx_id
;
645 uint64_t flags
= DCMD_DTYP_FLAGS
;
646 uint64_t rs
= IXGBE_ADVTXD_DCMD_RS
|DCMD_DTYP_FLAGS
;
649 /* cross rx_thresh boundary is not allowed */
650 nb_pkts
= RTE_MIN(nb_pkts
, txq
->tx_rs_thresh
);
652 if (txq
->nb_tx_free
< txq
->tx_free_thresh
)
653 ixgbe_tx_free_bufs(txq
);
655 nb_commit
= nb_pkts
= (uint16_t)RTE_MIN(txq
->nb_tx_free
, nb_pkts
);
656 if (unlikely(nb_pkts
== 0))
659 tx_id
= txq
->tx_tail
;
660 txdp
= &txq
->tx_ring
[tx_id
];
661 txep
= &txq
->sw_ring_v
[tx_id
];
663 txq
->nb_tx_free
= (uint16_t)(txq
->nb_tx_free
- nb_pkts
);
665 n
= (uint16_t)(txq
->nb_tx_desc
- tx_id
);
666 if (nb_commit
>= n
) {
668 tx_backlog_entry(txep
, tx_pkts
, n
);
670 for (i
= 0; i
< n
- 1; ++i
, ++tx_pkts
, ++txdp
)
671 vtx1(txdp
, *tx_pkts
, flags
);
673 vtx1(txdp
, *tx_pkts
++, rs
);
675 nb_commit
= (uint16_t)(nb_commit
- n
);
678 txq
->tx_next_rs
= (uint16_t)(txq
->tx_rs_thresh
- 1);
680 /* avoid reach the end of ring */
681 txdp
= &(txq
->tx_ring
[tx_id
]);
682 txep
= &txq
->sw_ring_v
[tx_id
];
685 tx_backlog_entry(txep
, tx_pkts
, nb_commit
);
687 vtx(txdp
, tx_pkts
, nb_commit
, flags
);
689 tx_id
= (uint16_t)(tx_id
+ nb_commit
);
690 if (tx_id
> txq
->tx_next_rs
) {
691 txq
->tx_ring
[txq
->tx_next_rs
].read
.cmd_type_len
|=
692 rte_cpu_to_le_32(IXGBE_ADVTXD_DCMD_RS
);
693 txq
->tx_next_rs
= (uint16_t)(txq
->tx_next_rs
+
697 txq
->tx_tail
= tx_id
;
699 IXGBE_PCI_REG_WRITE(txq
->tdt_reg_addr
, txq
->tx_tail
);
704 static void __attribute__((cold
))
705 ixgbe_tx_queue_release_mbufs_vec(struct ixgbe_tx_queue
*txq
)
707 _ixgbe_tx_queue_release_mbufs_vec(txq
);
710 void __attribute__((cold
))
711 ixgbe_rx_queue_release_mbufs_vec(struct ixgbe_rx_queue
*rxq
)
713 _ixgbe_rx_queue_release_mbufs_vec(rxq
);
716 static void __attribute__((cold
))
717 ixgbe_tx_free_swring(struct ixgbe_tx_queue
*txq
)
719 _ixgbe_tx_free_swring_vec(txq
);
722 static void __attribute__((cold
))
723 ixgbe_reset_tx_queue(struct ixgbe_tx_queue
*txq
)
725 _ixgbe_reset_tx_queue_vec(txq
);
728 static const struct ixgbe_txq_ops vec_txq_ops
= {
729 .release_mbufs
= ixgbe_tx_queue_release_mbufs_vec
,
730 .free_swring
= ixgbe_tx_free_swring
,
731 .reset
= ixgbe_reset_tx_queue
,
734 int __attribute__((cold
))
735 ixgbe_rxq_vec_setup(struct ixgbe_rx_queue
*rxq
)
737 return ixgbe_rxq_vec_setup_default(rxq
);
740 int __attribute__((cold
))
741 ixgbe_txq_vec_setup(struct ixgbe_tx_queue
*txq
)
743 return ixgbe_txq_vec_setup_default(txq
, &vec_txq_ops
);
746 int __attribute__((cold
))
747 ixgbe_rx_vec_dev_conf_condition_check(struct rte_eth_dev
*dev
)
749 return ixgbe_rx_vec_dev_conf_condition_check_default(dev
);