]> git.proxmox.com Git - ceph.git/blame - ceph/src/seastar/dpdk/drivers/net/i40e/i40e_rxtx_vec_neon.c
bump version to 19.2.0-pve1
[ceph.git] / ceph / src / seastar / dpdk / drivers / net / i40e / i40e_rxtx_vec_neon.c
CommitLineData
9f95a23c
TL
1/* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2010-2015 Intel Corporation.
3 * Copyright(c) 2016-2018, Linaro Limited.
7c673cae
FG
4 */
5
6#include <stdint.h>
f51cf556 7#include <ethdev_driver.h>
7c673cae 8#include <rte_malloc.h>
f51cf556 9#include <rte_vect.h>
7c673cae
FG
10
11#include "base/i40e_prototype.h"
12#include "base/i40e_type.h"
13#include "i40e_ethdev.h"
14#include "i40e_rxtx.h"
15#include "i40e_rxtx_vec_common.h"
16
7c673cae
FG
17
18#pragma GCC diagnostic ignored "-Wcast-qual"
19
20static inline void
21i40e_rxq_rearm(struct i40e_rx_queue *rxq)
22{
23 int i;
24 uint16_t rx_id;
25 volatile union i40e_rx_desc *rxdp;
26 struct i40e_rx_entry *rxep = &rxq->sw_ring[rxq->rxrearm_start];
27 struct rte_mbuf *mb0, *mb1;
28 uint64x2_t dma_addr0, dma_addr1;
29 uint64x2_t zero = vdupq_n_u64(0);
30 uint64_t paddr;
7c673cae
FG
31
32 rxdp = rxq->rx_ring + rxq->rxrearm_start;
33
34 /* Pull 'n' more MBUFs into the software ring */
35 if (unlikely(rte_mempool_get_bulk(rxq->mp,
36 (void *)rxep,
37 RTE_I40E_RXQ_REARM_THRESH) < 0)) {
38 if (rxq->rxrearm_nb + RTE_I40E_RXQ_REARM_THRESH >=
39 rxq->nb_rx_desc) {
40 for (i = 0; i < RTE_I40E_DESCS_PER_LOOP; i++) {
41 rxep[i].mbuf = &rxq->fake_mbuf;
42 vst1q_u64((uint64_t *)&rxdp[i].read, zero);
43 }
44 }
45 rte_eth_devices[rxq->port_id].data->rx_mbuf_alloc_failed +=
46 RTE_I40E_RXQ_REARM_THRESH;
47 return;
48 }
49
7c673cae
FG
50 /* Initialize the mbufs in vector, process 2 mbufs in one loop */
51 for (i = 0; i < RTE_I40E_RXQ_REARM_THRESH; i += 2, rxep += 2) {
52 mb0 = rxep[0].mbuf;
53 mb1 = rxep[1].mbuf;
54
9f95a23c 55 paddr = mb0->buf_iova + RTE_PKTMBUF_HEADROOM;
7c673cae
FG
56 dma_addr0 = vdupq_n_u64(paddr);
57
58 /* flush desc with pa dma_addr */
59 vst1q_u64((uint64_t *)&rxdp++->read, dma_addr0);
60
9f95a23c 61 paddr = mb1->buf_iova + RTE_PKTMBUF_HEADROOM;
7c673cae
FG
62 dma_addr1 = vdupq_n_u64(paddr);
63 vst1q_u64((uint64_t *)&rxdp++->read, dma_addr1);
64 }
65
66 rxq->rxrearm_start += RTE_I40E_RXQ_REARM_THRESH;
f51cf556
TL
67 rx_id = rxq->rxrearm_start - 1;
68
69 if (unlikely(rxq->rxrearm_start >= rxq->nb_rx_desc)) {
7c673cae 70 rxq->rxrearm_start = 0;
f51cf556
TL
71 rx_id = rxq->nb_rx_desc - 1;
72 }
7c673cae
FG
73
74 rxq->rxrearm_nb -= RTE_I40E_RXQ_REARM_THRESH;
75
f51cf556 76 rte_io_wmb();
7c673cae 77 /* Update the tail pointer on the NIC */
f51cf556
TL
78 I40E_PCI_REG_WRITE_RELAXED(rxq->qrx_tail, rx_id);
79}
80
81#ifndef RTE_LIBRTE_I40E_16BYTE_RX_DESC
82/* NEON version of FDIR mark extraction for 4 32B descriptors at a time */
83static inline uint32x4_t
84descs_to_fdir_32b(volatile union i40e_rx_desc *rxdp, struct rte_mbuf **rx_pkt)
85{
86 /* 32B descriptors: Load 2nd half of descriptors for FDIR ID data */
87 uint64x2_t desc0_qw23, desc1_qw23, desc2_qw23, desc3_qw23;
88 desc0_qw23 = vld1q_u64((uint64_t *)&(rxdp + 0)->wb.qword2);
89 desc1_qw23 = vld1q_u64((uint64_t *)&(rxdp + 1)->wb.qword2);
90 desc2_qw23 = vld1q_u64((uint64_t *)&(rxdp + 2)->wb.qword2);
91 desc3_qw23 = vld1q_u64((uint64_t *)&(rxdp + 3)->wb.qword2);
92
93 /* FDIR ID data: move last u32 of each desc to 4 u32 lanes */
94 uint32x4_t v_unpack_02, v_unpack_13;
95 v_unpack_02 = vzipq_u32(vreinterpretq_u32_u64(desc0_qw23),
96 vreinterpretq_u32_u64(desc2_qw23)).val[1];
97 v_unpack_13 = vzipq_u32(vreinterpretq_u32_u64(desc1_qw23),
98 vreinterpretq_u32_u64(desc3_qw23)).val[1];
99 uint32x4_t v_fdir_ids = vzipq_u32(v_unpack_02, v_unpack_13).val[1];
100
101 /* Extended Status: extract from each lower 32 bits, to u32 lanes */
102 v_unpack_02 = vzipq_u32(vreinterpretq_u32_u64(desc0_qw23),
103 vreinterpretq_u32_u64(desc2_qw23)).val[0];
104 v_unpack_13 = vzipq_u32(vreinterpretq_u32_u64(desc1_qw23),
105 vreinterpretq_u32_u64(desc3_qw23)).val[0];
106 uint32x4_t v_flt_status = vzipq_u32(v_unpack_02, v_unpack_13).val[0];
107
108 /* Shift u32 left and right to "mask away" bits not required.
109 * Data required is 4:5 (zero based), so left shift by 26 (32-6)
110 * and then right shift by 30 (32 - 2 bits required).
111 */
112 v_flt_status = vshlq_n_u32(v_flt_status, 26);
113 v_flt_status = vshrq_n_u32(v_flt_status, 30);
114
115 /* Generate constant 1 in all u32 lanes */
116 RTE_BUILD_BUG_ON(I40E_RX_DESC_EXT_STATUS_FLEXBH_FD_ID != 1);
117 uint32x4_t v_u32_one = vdupq_n_u32(1);
118
119 /* Per desc mask, bits set if FDIR ID is valid */
120 uint32x4_t v_fd_id_mask = vceqq_u32(v_flt_status, v_u32_one);
121
122 /* Mask ID data to zero if the FD_ID bit not set in desc */
123 v_fdir_ids = vandq_u32(v_fdir_ids, v_fd_id_mask);
124
125 /* Store data to fdir.hi in mbuf */
126 rx_pkt[0]->hash.fdir.hi = vgetq_lane_u32(v_fdir_ids, 0);
127 rx_pkt[1]->hash.fdir.hi = vgetq_lane_u32(v_fdir_ids, 1);
128 rx_pkt[2]->hash.fdir.hi = vgetq_lane_u32(v_fdir_ids, 2);
129 rx_pkt[3]->hash.fdir.hi = vgetq_lane_u32(v_fdir_ids, 3);
130
131 /* Convert fdir_id_mask into a single bit, then shift as required for
132 * correct location in the mbuf->olflags
133 */
134 RTE_BUILD_BUG_ON(RTE_MBUF_F_RX_FDIR_ID != (1 << 13));
135 v_fd_id_mask = vshrq_n_u32(v_fd_id_mask, 31);
136 v_fd_id_mask = vshlq_n_u32(v_fd_id_mask, 13);
137
138 /* The returned value must be combined into each mbuf. This is already
139 * being done for RSS and VLAN mbuf olflags, so return bits to OR in.
140 */
141 return v_fd_id_mask;
142}
143
144#else /* 32 or 16B FDIR ID handling */
145
146/* Handle 16B descriptor FDIR ID flag setting based on FLM(bit11). See scalar driver
147 * for scalar implementation of the same functionality.
148 */
149static inline uint32x4_t
150descs_to_fdir_16b(uint32x4_t fltstat, uint64x2_t descs[4], struct rte_mbuf **rx_pkt)
151{
152 /* Unpack filter-status data from descriptors */
153 uint32x4_t v_tmp_02 = vzipq_u32(vreinterpretq_u32_u64(descs[0]),
154 vreinterpretq_u32_u64(descs[2])).val[0];
155 uint32x4_t v_tmp_13 = vzipq_u32(vreinterpretq_u32_u64(descs[1]),
156 vreinterpretq_u32_u64(descs[3])).val[0];
157 uint32x4_t v_fdir_ids = vzipq_u32(v_tmp_02, v_tmp_13).val[1];
158
159 /* Generate 111 and 11 in each u32 lane */
160 uint32x4_t v_111_mask = vdupq_n_u32(7);
161 uint32x4_t v_11_mask = vdupq_n_u32(3);
162
163 /* Compare and mask away FDIR ID data if bit not set */
164 uint32x4_t v_u32_bits = vandq_u32(v_111_mask, fltstat);
165 uint32x4_t v_fdir_id_mask = vceqq_u32(v_u32_bits, v_11_mask);
166 v_fdir_ids = vandq_u32(v_fdir_id_mask, v_fdir_ids);
167
168 /* Store data to fdir.hi in mbuf */
169 rx_pkt[0]->hash.fdir.hi = vgetq_lane_u32(v_fdir_ids, 0);
170 rx_pkt[1]->hash.fdir.hi = vgetq_lane_u32(v_fdir_ids, 1);
171 rx_pkt[2]->hash.fdir.hi = vgetq_lane_u32(v_fdir_ids, 2);
172 rx_pkt[3]->hash.fdir.hi = vgetq_lane_u32(v_fdir_ids, 3);
173
174 /* Top lane ones mask for FDIR isolation */
175 uint32x4_t v_desc_fdir_mask = {0, UINT32_MAX, 0, 0};
176
177 /* Move fdir_id_mask to correct lane, zero RSS in mbuf if fdir hits */
178 uint32x4_t v_zeros = {0, 0, 0, 0};
179 uint32x4_t v_desc3_shift = vextq_u32(v_fdir_id_mask, v_zeros, 2);
180 uint32x4_t v_desc3_mask = vandq_u32(v_desc_fdir_mask, v_desc3_shift);
181 descs[3] = vreinterpretq_u64_u32(vbslq_u32(v_desc3_mask, v_zeros,
182 vreinterpretq_u32_u64(descs[3])));
183
184 uint32x4_t v_desc2_shift = vextq_u32(v_fdir_id_mask, v_zeros, 1);
185 uint32x4_t v_desc2_mask = vandq_u32(v_desc_fdir_mask, v_desc2_shift);
186 descs[2] = vreinterpretq_u64_u32(vbslq_u32(v_desc2_mask, v_zeros,
187 vreinterpretq_u32_u64(descs[2])));
188
189 uint32x4_t v_desc1_shift = v_fdir_id_mask;
190 uint32x4_t v_desc1_mask = vandq_u32(v_desc_fdir_mask, v_desc1_shift);
191 descs[1] = vreinterpretq_u64_u32(vbslq_u32(v_desc1_mask, v_zeros,
192 vreinterpretq_u32_u64(descs[1])));
193
194 uint32x4_t v_desc0_shift = vextq_u32(v_zeros, v_fdir_id_mask, 3);
195 uint32x4_t v_desc0_mask = vandq_u32(v_desc_fdir_mask, v_desc0_shift);
196 descs[0] = vreinterpretq_u64_u32(vbslq_u32(v_desc0_mask, v_zeros,
197 vreinterpretq_u32_u64(descs[0])));
198
199 /* Shift to 1 or 0 bit per u32 lane, then to RTE_MBUF_F_RX_FDIR_ID offset */
200 RTE_BUILD_BUG_ON(RTE_MBUF_F_RX_FDIR_ID != (1 << 13));
201 uint32x4_t v_mask_one_bit = vshrq_n_u32(v_fdir_id_mask, 31);
202 return vshlq_n_u32(v_mask_one_bit, 13);
7c673cae 203}
f51cf556 204#endif
7c673cae 205
7c673cae 206static inline void
f51cf556
TL
207desc_to_olflags_v(struct i40e_rx_queue *rxq, volatile union i40e_rx_desc *rxdp,
208 uint64x2_t descs[4], struct rte_mbuf **rx_pkts)
7c673cae
FG
209{
210 uint32x4_t vlan0, vlan1, rss, l3_l4e;
11fdf7f2
TL
211 const uint64x2_t mbuf_init = {rxq->mbuf_initializer, 0};
212 uint64x2_t rearm0, rearm1, rearm2, rearm3;
7c673cae
FG
213
214 /* mask everything except RSS, flow director and VLAN flags
215 * bit2 is for VLAN tag, bit11 for flow director indication
216 * bit13:12 for RSS indication.
217 */
218 const uint32x4_t rss_vlan_msk = {
219 0x1c03804, 0x1c03804, 0x1c03804, 0x1c03804};
220
11fdf7f2 221 const uint32x4_t cksum_mask = {
f51cf556
TL
222 RTE_MBUF_F_RX_IP_CKSUM_GOOD | RTE_MBUF_F_RX_IP_CKSUM_BAD |
223 RTE_MBUF_F_RX_L4_CKSUM_GOOD | RTE_MBUF_F_RX_L4_CKSUM_BAD |
224 RTE_MBUF_F_RX_OUTER_IP_CKSUM_BAD,
225 RTE_MBUF_F_RX_IP_CKSUM_GOOD | RTE_MBUF_F_RX_IP_CKSUM_BAD |
226 RTE_MBUF_F_RX_L4_CKSUM_GOOD | RTE_MBUF_F_RX_L4_CKSUM_BAD |
227 RTE_MBUF_F_RX_OUTER_IP_CKSUM_BAD,
228 RTE_MBUF_F_RX_IP_CKSUM_GOOD | RTE_MBUF_F_RX_IP_CKSUM_BAD |
229 RTE_MBUF_F_RX_L4_CKSUM_GOOD | RTE_MBUF_F_RX_L4_CKSUM_BAD |
230 RTE_MBUF_F_RX_OUTER_IP_CKSUM_BAD,
231 RTE_MBUF_F_RX_IP_CKSUM_GOOD | RTE_MBUF_F_RX_IP_CKSUM_BAD |
232 RTE_MBUF_F_RX_L4_CKSUM_GOOD | RTE_MBUF_F_RX_L4_CKSUM_BAD |
233 RTE_MBUF_F_RX_OUTER_IP_CKSUM_BAD};
11fdf7f2 234
7c673cae
FG
235 /* map rss and vlan type to rss hash and vlan flag */
236 const uint8x16_t vlan_flags = {
237 0, 0, 0, 0,
f51cf556 238 RTE_MBUF_F_RX_VLAN | RTE_MBUF_F_RX_VLAN_STRIPPED, 0, 0, 0,
7c673cae
FG
239 0, 0, 0, 0,
240 0, 0, 0, 0};
241
242 const uint8x16_t rss_flags = {
f51cf556
TL
243 0, RTE_MBUF_F_RX_FDIR, 0, 0,
244 0, 0, RTE_MBUF_F_RX_RSS_HASH, RTE_MBUF_F_RX_RSS_HASH | RTE_MBUF_F_RX_FDIR,
7c673cae
FG
245 0, 0, 0, 0,
246 0, 0, 0, 0};
247
248 const uint8x16_t l3_l4e_flags = {
f51cf556
TL
249 (RTE_MBUF_F_RX_IP_CKSUM_GOOD | RTE_MBUF_F_RX_L4_CKSUM_GOOD) >> 1,
250 RTE_MBUF_F_RX_IP_CKSUM_BAD >> 1,
251 (RTE_MBUF_F_RX_IP_CKSUM_GOOD | RTE_MBUF_F_RX_L4_CKSUM_BAD) >> 1,
252 (RTE_MBUF_F_RX_L4_CKSUM_BAD | RTE_MBUF_F_RX_IP_CKSUM_BAD) >> 1,
253 (RTE_MBUF_F_RX_IP_CKSUM_GOOD | RTE_MBUF_F_RX_OUTER_IP_CKSUM_BAD) >> 1,
254 (RTE_MBUF_F_RX_OUTER_IP_CKSUM_BAD | RTE_MBUF_F_RX_IP_CKSUM_BAD) >> 1,
255 (RTE_MBUF_F_RX_IP_CKSUM_GOOD | RTE_MBUF_F_RX_OUTER_IP_CKSUM_BAD |
256 RTE_MBUF_F_RX_L4_CKSUM_BAD) >> 1,
257 (RTE_MBUF_F_RX_OUTER_IP_CKSUM_BAD | RTE_MBUF_F_RX_L4_CKSUM_BAD |
258 RTE_MBUF_F_RX_IP_CKSUM_BAD) >> 1,
7c673cae
FG
259 0, 0, 0, 0, 0, 0, 0, 0};
260
261 vlan0 = vzipq_u32(vreinterpretq_u32_u64(descs[0]),
262 vreinterpretq_u32_u64(descs[2])).val[1];
263 vlan1 = vzipq_u32(vreinterpretq_u32_u64(descs[1]),
264 vreinterpretq_u32_u64(descs[3])).val[1];
265 vlan0 = vzipq_u32(vlan0, vlan1).val[0];
266
267 vlan1 = vandq_u32(vlan0, rss_vlan_msk);
268 vlan0 = vreinterpretq_u32_u8(vqtbl1q_u8(vlan_flags,
269 vreinterpretq_u8_u32(vlan1)));
270
f51cf556 271 const uint32x4_t desc_fltstat = vshrq_n_u32(vlan1, 11);
7c673cae 272 rss = vreinterpretq_u32_u8(vqtbl1q_u8(rss_flags,
f51cf556 273 vreinterpretq_u8_u32(desc_fltstat)));
7c673cae
FG
274
275 l3_l4e = vshrq_n_u32(vlan1, 22);
276 l3_l4e = vreinterpretq_u32_u8(vqtbl1q_u8(l3_l4e_flags,
277 vreinterpretq_u8_u32(l3_l4e)));
11fdf7f2
TL
278 /* then we shift left 1 bit */
279 l3_l4e = vshlq_n_u32(l3_l4e, 1);
f51cf556 280 /* we need to mask out the redundant bits */
11fdf7f2 281 l3_l4e = vandq_u32(l3_l4e, cksum_mask);
7c673cae
FG
282
283 vlan0 = vorrq_u32(vlan0, rss);
284 vlan0 = vorrq_u32(vlan0, l3_l4e);
285
f51cf556
TL
286 /* Extract FDIR ID only if FDIR is enabled to avoid useless work */
287 if (rxq->fdir_enabled) {
288#ifndef RTE_LIBRTE_I40E_16BYTE_RX_DESC
289 uint32x4_t v_fdir_ol_flags = descs_to_fdir_32b(rxdp, rx_pkts);
290#else
291 (void)rxdp; /* rxdp not required for 16B desc mode */
292 uint32x4_t v_fdir_ol_flags = descs_to_fdir_16b(desc_fltstat, descs, rx_pkts);
293#endif
294 /* OR in ol_flag bits after descriptor specific extraction */
295 vlan0 = vorrq_u32(vlan0, v_fdir_ol_flags);
296 }
297
11fdf7f2
TL
298 rearm0 = vsetq_lane_u64(vgetq_lane_u32(vlan0, 0), mbuf_init, 1);
299 rearm1 = vsetq_lane_u64(vgetq_lane_u32(vlan0, 1), mbuf_init, 1);
300 rearm2 = vsetq_lane_u64(vgetq_lane_u32(vlan0, 2), mbuf_init, 1);
301 rearm3 = vsetq_lane_u64(vgetq_lane_u32(vlan0, 3), mbuf_init, 1);
302
303 vst1q_u64((uint64_t *)&rx_pkts[0]->rearm_data, rearm0);
304 vst1q_u64((uint64_t *)&rx_pkts[1]->rearm_data, rearm1);
305 vst1q_u64((uint64_t *)&rx_pkts[2]->rearm_data, rearm2);
306 vst1q_u64((uint64_t *)&rx_pkts[3]->rearm_data, rearm3);
7c673cae 307}
7c673cae
FG
308
309#define PKTLEN_SHIFT 10
9f95a23c 310#define I40E_UINT16_BIT (CHAR_BIT * sizeof(uint16_t))
7c673cae
FG
311
312static inline void
f51cf556
TL
313desc_to_ptype_v(uint64x2_t descs[4], struct rte_mbuf **__rte_restrict rx_pkts,
314 uint32_t *__rte_restrict ptype_tbl)
7c673cae
FG
315{
316 int i;
317 uint8_t ptype;
318 uint8x16_t tmp;
319
320 for (i = 0; i < 4; i++) {
321 tmp = vreinterpretq_u8_u64(vshrq_n_u64(descs[i], 30));
322 ptype = vgetq_lane_u8(tmp, 8);
11fdf7f2 323 rx_pkts[i]->packet_type = ptype_tbl[ptype];
7c673cae
FG
324 }
325
326}
327
f51cf556
TL
328/**
329 * vPMD raw receive routine, only accept(nb_pkts >= RTE_I40E_DESCS_PER_LOOP)
330 *
7c673cae
FG
331 * Notice:
332 * - nb_pkts < RTE_I40E_DESCS_PER_LOOP, just return no packet
f51cf556 333 * - floor align nb_pkts to a RTE_I40E_DESCS_PER_LOOP power-of-two
7c673cae
FG
334 */
335static inline uint16_t
f51cf556
TL
336_recv_raw_pkts_vec(struct i40e_rx_queue *__rte_restrict rxq,
337 struct rte_mbuf **__rte_restrict rx_pkts,
7c673cae
FG
338 uint16_t nb_pkts, uint8_t *split_packet)
339{
340 volatile union i40e_rx_desc *rxdp;
341 struct i40e_rx_entry *sw_ring;
342 uint16_t nb_pkts_recd;
343 int pos;
11fdf7f2 344 uint32_t *ptype_tbl = rxq->vsi->adapter->ptype_tbl;
7c673cae
FG
345
346 /* mask to shuffle from desc. to mbuf */
347 uint8x16_t shuf_msk = {
348 0xFF, 0xFF, /* pkt_type set as unknown */
349 0xFF, 0xFF, /* pkt_type set as unknown */
350 14, 15, /* octet 15~14, low 16 bits pkt_len */
351 0xFF, 0xFF, /* skip high 16 bits pkt_len, zero out */
352 14, 15, /* octet 15~14, 16 bits data_len */
353 2, 3, /* octet 2~3, low 16 bits vlan_macip */
354 4, 5, 6, 7 /* octet 4~7, 32bits rss */
355 };
356
357 uint8x16_t eop_check = {
358 0x02, 0x00, 0x02, 0x00,
359 0x02, 0x00, 0x02, 0x00,
360 0x00, 0x00, 0x00, 0x00,
361 0x00, 0x00, 0x00, 0x00
362 };
363
364 uint16x8_t crc_adjust = {
365 0, 0, /* ignore pkt_type field */
366 rxq->crc_len, /* sub crc on pkt_len */
367 0, /* ignore high-16bits of pkt_len */
368 rxq->crc_len, /* sub crc on data_len */
369 0, 0, 0 /* ignore non-length fields */
370 };
371
7c673cae
FG
372 /* nb_pkts has to be floor-aligned to RTE_I40E_DESCS_PER_LOOP */
373 nb_pkts = RTE_ALIGN_FLOOR(nb_pkts, RTE_I40E_DESCS_PER_LOOP);
374
375 /* Just the act of getting into the function from the application is
376 * going to cost about 7 cycles
377 */
378 rxdp = rxq->rx_ring + rxq->rx_tail;
379
380 rte_prefetch_non_temporal(rxdp);
381
382 /* See if we need to rearm the RX queue - gives the prefetch a bit
383 * of time to act
384 */
385 if (rxq->rxrearm_nb > RTE_I40E_RXQ_REARM_THRESH)
386 i40e_rxq_rearm(rxq);
387
388 /* Before we start moving massive data around, check to see if
389 * there is actually a packet available
390 */
391 if (!(rxdp->wb.qword1.status_error_len &
392 rte_cpu_to_le_32(1 << I40E_RX_DESC_STATUS_DD_SHIFT)))
393 return 0;
394
395 /* Cache is empty -> need to scan the buffer rings, but first move
396 * the next 'n' mbufs into the cache
397 */
398 sw_ring = &rxq->sw_ring[rxq->rx_tail];
399
400 /* A. load 4 packet in one loop
401 * [A*. mask out 4 unused dirty field in desc]
402 * B. copy 4 mbuf point from swring to rx_pkts
403 * C. calc the number of DD bits among the 4 packets
404 * [C*. extract the end-of-packet bit, if requested]
405 * D. fill info. from desc to mbuf
406 */
407
408 for (pos = 0, nb_pkts_recd = 0; pos < nb_pkts;
409 pos += RTE_I40E_DESCS_PER_LOOP,
410 rxdp += RTE_I40E_DESCS_PER_LOOP) {
411 uint64x2_t descs[RTE_I40E_DESCS_PER_LOOP];
412 uint8x16_t pkt_mb1, pkt_mb2, pkt_mb3, pkt_mb4;
413 uint16x8x2_t sterr_tmp1, sterr_tmp2;
414 uint64x2_t mbp1, mbp2;
415 uint16x8_t staterr;
416 uint16x8_t tmp;
417 uint64_t stat;
418
419 int32x4_t len_shl = {0, 0, 0, PKTLEN_SHIFT};
420
f51cf556 421 /* A.1 load desc[3-0] */
7c673cae 422 descs[3] = vld1q_u64((uint64_t *)(rxdp + 3));
7c673cae 423 descs[2] = vld1q_u64((uint64_t *)(rxdp + 2));
7c673cae
FG
424 descs[1] = vld1q_u64((uint64_t *)(rxdp + 1));
425 descs[0] = vld1q_u64((uint64_t *)(rxdp));
426
f51cf556
TL
427 /* Use acquire fence to order loads of descriptor qwords */
428 rte_atomic_thread_fence(__ATOMIC_ACQUIRE);
429 /* A.2 reload qword0 to make it ordered after qword1 load */
430 descs[3] = vld1q_lane_u64((uint64_t *)(rxdp + 3), descs[3], 0);
431 descs[2] = vld1q_lane_u64((uint64_t *)(rxdp + 2), descs[2], 0);
432 descs[1] = vld1q_lane_u64((uint64_t *)(rxdp + 1), descs[1], 0);
433 descs[0] = vld1q_lane_u64((uint64_t *)(rxdp), descs[0], 0);
434
435 /* B.1 load 4 mbuf point */
436 mbp1 = vld1q_u64((uint64_t *)&sw_ring[pos]);
437 mbp2 = vld1q_u64((uint64_t *)&sw_ring[pos + 2]);
438
439 /* B.2 copy 4 mbuf point into rx_pkts */
440 vst1q_u64((uint64_t *)&rx_pkts[pos], mbp1);
7c673cae
FG
441 vst1q_u64((uint64_t *)&rx_pkts[pos + 2], mbp2);
442
443 if (split_packet) {
444 rte_mbuf_prefetch_part2(rx_pkts[pos]);
445 rte_mbuf_prefetch_part2(rx_pkts[pos + 1]);
446 rte_mbuf_prefetch_part2(rx_pkts[pos + 2]);
447 rte_mbuf_prefetch_part2(rx_pkts[pos + 3]);
448 }
449
f51cf556 450 /* pkts shift the pktlen field to be 16-bit aligned*/
7c673cae
FG
451 uint32x4_t len3 = vshlq_u32(vreinterpretq_u32_u64(descs[3]),
452 len_shl);
f51cf556
TL
453 descs[3] = vreinterpretq_u64_u16(vsetq_lane_u16
454 (vgetq_lane_u16(vreinterpretq_u16_u32(len3), 7),
455 vreinterpretq_u16_u64(descs[3]),
456 7));
7c673cae
FG
457 uint32x4_t len2 = vshlq_u32(vreinterpretq_u32_u64(descs[2]),
458 len_shl);
f51cf556
TL
459 descs[2] = vreinterpretq_u64_u16(vsetq_lane_u16
460 (vgetq_lane_u16(vreinterpretq_u16_u32(len2), 7),
461 vreinterpretq_u16_u64(descs[2]),
462 7));
463 uint32x4_t len1 = vshlq_u32(vreinterpretq_u32_u64(descs[1]),
464 len_shl);
465 descs[1] = vreinterpretq_u64_u16(vsetq_lane_u16
466 (vgetq_lane_u16(vreinterpretq_u16_u32(len1), 7),
467 vreinterpretq_u16_u64(descs[1]),
468 7));
469 uint32x4_t len0 = vshlq_u32(vreinterpretq_u32_u64(descs[0]),
470 len_shl);
471 descs[0] = vreinterpretq_u64_u16(vsetq_lane_u16
472 (vgetq_lane_u16(vreinterpretq_u16_u32(len0), 7),
473 vreinterpretq_u16_u64(descs[0]),
474 7));
475
476 desc_to_olflags_v(rxq, rxdp, descs, &rx_pkts[pos]);
7c673cae 477
f51cf556 478 /* D.1 pkts convert format from desc to pktmbuf */
7c673cae
FG
479 pkt_mb4 = vqtbl1q_u8(vreinterpretq_u8_u64(descs[3]), shuf_msk);
480 pkt_mb3 = vqtbl1q_u8(vreinterpretq_u8_u64(descs[2]), shuf_msk);
f51cf556
TL
481 pkt_mb2 = vqtbl1q_u8(vreinterpretq_u8_u64(descs[1]), shuf_msk);
482 pkt_mb1 = vqtbl1q_u8(vreinterpretq_u8_u64(descs[0]), shuf_msk);
483
484 /* D.2 pkts set in_port/nb_seg and remove crc */
485 tmp = vsubq_u16(vreinterpretq_u16_u8(pkt_mb4), crc_adjust);
486 pkt_mb4 = vreinterpretq_u8_u16(tmp);
487 tmp = vsubq_u16(vreinterpretq_u16_u8(pkt_mb3), crc_adjust);
488 pkt_mb3 = vreinterpretq_u8_u16(tmp);
489 tmp = vsubq_u16(vreinterpretq_u16_u8(pkt_mb2), crc_adjust);
490 pkt_mb2 = vreinterpretq_u8_u16(tmp);
491 tmp = vsubq_u16(vreinterpretq_u16_u8(pkt_mb1), crc_adjust);
492 pkt_mb1 = vreinterpretq_u8_u16(tmp);
493
494 /* D.3 copy final data to rx_pkts */
495 vst1q_u8((void *)&rx_pkts[pos + 3]->rx_descriptor_fields1,
496 pkt_mb4);
497 vst1q_u8((void *)&rx_pkts[pos + 2]->rx_descriptor_fields1,
498 pkt_mb3);
499 vst1q_u8((void *)&rx_pkts[pos + 1]->rx_descriptor_fields1,
500 pkt_mb2);
501 vst1q_u8((void *)&rx_pkts[pos]->rx_descriptor_fields1,
502 pkt_mb1);
503
504 desc_to_ptype_v(descs, &rx_pkts[pos], ptype_tbl);
505
506 if (likely(pos + RTE_I40E_DESCS_PER_LOOP < nb_pkts)) {
507 rte_prefetch_non_temporal(rxdp + RTE_I40E_DESCS_PER_LOOP);
508 }
7c673cae
FG
509
510 /* C.1 4=>2 filter staterr info only */
511 sterr_tmp2 = vzipq_u16(vreinterpretq_u16_u64(descs[1]),
512 vreinterpretq_u16_u64(descs[3]));
7c673cae
FG
513 sterr_tmp1 = vzipq_u16(vreinterpretq_u16_u64(descs[0]),
514 vreinterpretq_u16_u64(descs[2]));
515
516 /* C.2 get 4 pkts staterr value */
517 staterr = vzipq_u16(sterr_tmp1.val[1],
518 sterr_tmp2.val[1]).val[0];
7c673cae 519
7c673cae
FG
520 /* C* extract and record EOP bit */
521 if (split_packet) {
522 uint8x16_t eop_shuf_mask = {
523 0x00, 0x02, 0x04, 0x06,
524 0xFF, 0xFF, 0xFF, 0xFF,
525 0xFF, 0xFF, 0xFF, 0xFF,
526 0xFF, 0xFF, 0xFF, 0xFF};
527 uint8x16_t eop_bits;
528
529 /* and with mask to extract bits, flipping 1-0 */
530 eop_bits = vmvnq_u8(vreinterpretq_u8_u16(staterr));
531 eop_bits = vandq_u8(eop_bits, eop_check);
532 /* the staterr values are not in order, as the count
f51cf556 533 * of dd bits doesn't care. However, for end of
7c673cae
FG
534 * packet tracking, we do care, so shuffle. This also
535 * compresses the 32-bit values to 8-bit
536 */
537 eop_bits = vqtbl1q_u8(eop_bits, eop_shuf_mask);
538
539 /* store the resulting 32-bit value */
540 vst1q_lane_u32((uint32_t *)split_packet,
541 vreinterpretq_u32_u8(eop_bits), 0);
542 split_packet += RTE_I40E_DESCS_PER_LOOP;
543
544 /* zero-out next pointers */
545 rx_pkts[pos]->next = NULL;
546 rx_pkts[pos + 1]->next = NULL;
547 rx_pkts[pos + 2]->next = NULL;
548 rx_pkts[pos + 3]->next = NULL;
549 }
550
9f95a23c
TL
551 staterr = vshlq_n_u16(staterr, I40E_UINT16_BIT - 1);
552 staterr = vreinterpretq_u16_s16(
553 vshrq_n_s16(vreinterpretq_s16_u16(staterr),
554 I40E_UINT16_BIT - 1));
555 stat = ~vgetq_lane_u64(vreinterpretq_u64_u16(staterr), 0);
556
f51cf556 557 /* C.4 calc available number of desc */
9f95a23c
TL
558 if (unlikely(stat == 0)) {
559 nb_pkts_recd += RTE_I40E_DESCS_PER_LOOP;
560 } else {
561 nb_pkts_recd += __builtin_ctzl(stat) / I40E_UINT16_BIT;
7c673cae 562 break;
9f95a23c 563 }
7c673cae
FG
564 }
565
566 /* Update our internal tail pointer */
567 rxq->rx_tail = (uint16_t)(rxq->rx_tail + nb_pkts_recd);
568 rxq->rx_tail = (uint16_t)(rxq->rx_tail & (rxq->nb_rx_desc - 1));
569 rxq->rxrearm_nb = (uint16_t)(rxq->rxrearm_nb + nb_pkts_recd);
570
571 return nb_pkts_recd;
572}
573
574 /*
575 * Notice:
576 * - nb_pkts < RTE_I40E_DESCS_PER_LOOP, just return no packet
7c673cae
FG
577 */
578uint16_t
f51cf556
TL
579i40e_recv_pkts_vec(void *__rte_restrict rx_queue,
580 struct rte_mbuf **__rte_restrict rx_pkts, uint16_t nb_pkts)
7c673cae
FG
581{
582 return _recv_raw_pkts_vec(rx_queue, rx_pkts, nb_pkts, NULL);
583}
584
f51cf556
TL
585/**
586 * vPMD receive routine that reassembles single burst of 32 scattered packets
587 *
7c673cae
FG
588 * Notice:
589 * - nb_pkts < RTE_I40E_DESCS_PER_LOOP, just return no packet
7c673cae 590 */
f51cf556
TL
591static uint16_t
592i40e_recv_scattered_burst_vec(void *rx_queue, struct rte_mbuf **rx_pkts,
593 uint16_t nb_pkts)
7c673cae
FG
594{
595
596 struct i40e_rx_queue *rxq = rx_queue;
597 uint8_t split_flags[RTE_I40E_VPMD_RX_BURST] = {0};
598
599 /* get some new buffers */
600 uint16_t nb_bufs = _recv_raw_pkts_vec(rxq, rx_pkts, nb_pkts,
601 split_flags);
602 if (nb_bufs == 0)
603 return 0;
604
605 /* happy day case, full burst + no packets to be joined */
606 const uint64_t *split_fl64 = (uint64_t *)split_flags;
607
608 if (rxq->pkt_first_seg == NULL &&
609 split_fl64[0] == 0 && split_fl64[1] == 0 &&
610 split_fl64[2] == 0 && split_fl64[3] == 0)
611 return nb_bufs;
612
613 /* reassemble any packets that need reassembly*/
614 unsigned i = 0;
615
616 if (rxq->pkt_first_seg == NULL) {
617 /* find the first split flag, and only reassemble then*/
618 while (i < nb_bufs && !split_flags[i])
619 i++;
620 if (i == nb_bufs)
621 return nb_bufs;
f51cf556 622 rxq->pkt_first_seg = rx_pkts[i];
7c673cae
FG
623 }
624 return i + reassemble_packets(rxq, &rx_pkts[i], nb_bufs - i,
625 &split_flags[i]);
626}
627
f51cf556
TL
628/**
629 * vPMD receive routine that reassembles scattered packets.
630 */
631uint16_t
632i40e_recv_scattered_pkts_vec(void *rx_queue, struct rte_mbuf **rx_pkts,
633 uint16_t nb_pkts)
634{
635 uint16_t retval = 0;
636
637 while (nb_pkts > RTE_I40E_VPMD_RX_BURST) {
638 uint16_t burst;
639
640 burst = i40e_recv_scattered_burst_vec(rx_queue,
641 rx_pkts + retval,
642 RTE_I40E_VPMD_RX_BURST);
643 retval += burst;
644 nb_pkts -= burst;
645 if (burst < RTE_I40E_VPMD_RX_BURST)
646 return retval;
647 }
648
649 return retval + i40e_recv_scattered_burst_vec(rx_queue,
650 rx_pkts + retval,
651 nb_pkts);
652}
653
7c673cae
FG
654static inline void
655vtx1(volatile struct i40e_tx_desc *txdp,
656 struct rte_mbuf *pkt, uint64_t flags)
657{
658 uint64_t high_qw = (I40E_TX_DESC_DTYPE_DATA |
659 ((uint64_t)flags << I40E_TXD_QW1_CMD_SHIFT) |
660 ((uint64_t)pkt->data_len << I40E_TXD_QW1_TX_BUF_SZ_SHIFT));
661
9f95a23c 662 uint64x2_t descriptor = {pkt->buf_iova + pkt->data_off, high_qw};
7c673cae
FG
663 vst1q_u64((uint64_t *)txdp, descriptor);
664}
665
666static inline void
f51cf556
TL
667vtx(volatile struct i40e_tx_desc *txdp, struct rte_mbuf **pkt,
668 uint16_t nb_pkts, uint64_t flags)
7c673cae
FG
669{
670 int i;
671
672 for (i = 0; i < nb_pkts; ++i, ++txdp, ++pkt)
673 vtx1(txdp, *pkt, flags);
674}
675
676uint16_t
f51cf556
TL
677i40e_xmit_fixed_burst_vec(void *__rte_restrict tx_queue,
678 struct rte_mbuf **__rte_restrict tx_pkts, uint16_t nb_pkts)
7c673cae
FG
679{
680 struct i40e_tx_queue *txq = (struct i40e_tx_queue *)tx_queue;
681 volatile struct i40e_tx_desc *txdp;
682 struct i40e_tx_entry *txep;
683 uint16_t n, nb_commit, tx_id;
684 uint64_t flags = I40E_TD_CMD;
685 uint64_t rs = I40E_TX_DESC_CMD_RS | I40E_TD_CMD;
686 int i;
687
7c673cae
FG
688 if (txq->nb_tx_free < txq->tx_free_thresh)
689 i40e_tx_free_bufs(txq);
690
691 nb_commit = nb_pkts = (uint16_t)RTE_MIN(txq->nb_tx_free, nb_pkts);
692 if (unlikely(nb_pkts == 0))
693 return 0;
694
695 tx_id = txq->tx_tail;
696 txdp = &txq->tx_ring[tx_id];
697 txep = &txq->sw_ring[tx_id];
698
699 txq->nb_tx_free = (uint16_t)(txq->nb_tx_free - nb_pkts);
700
701 n = (uint16_t)(txq->nb_tx_desc - tx_id);
702 if (nb_commit >= n) {
703 tx_backlog_entry(txep, tx_pkts, n);
704
705 for (i = 0; i < n - 1; ++i, ++tx_pkts, ++txdp)
706 vtx1(txdp, *tx_pkts, flags);
707
708 vtx1(txdp, *tx_pkts++, rs);
709
710 nb_commit = (uint16_t)(nb_commit - n);
711
712 tx_id = 0;
713 txq->tx_next_rs = (uint16_t)(txq->tx_rs_thresh - 1);
714
715 /* avoid reach the end of ring */
716 txdp = &txq->tx_ring[tx_id];
717 txep = &txq->sw_ring[tx_id];
718 }
719
720 tx_backlog_entry(txep, tx_pkts, nb_commit);
721
722 vtx(txdp, tx_pkts, nb_commit, flags);
723
724 tx_id = (uint16_t)(tx_id + nb_commit);
725 if (tx_id > txq->tx_next_rs) {
726 txq->tx_ring[txq->tx_next_rs].cmd_type_offset_bsz |=
727 rte_cpu_to_le_64(((uint64_t)I40E_TX_DESC_CMD_RS) <<
728 I40E_TXD_QW1_CMD_SHIFT);
729 txq->tx_next_rs =
730 (uint16_t)(txq->tx_next_rs + txq->tx_rs_thresh);
731 }
732
733 txq->tx_tail = tx_id;
734
f51cf556
TL
735 rte_io_wmb();
736 I40E_PCI_REG_WRITE_RELAXED(txq->qtx_tail, tx_id);
7c673cae
FG
737
738 return nb_pkts;
739}
740
f51cf556 741void __rte_cold
7c673cae
FG
742i40e_rx_queue_release_mbufs_vec(struct i40e_rx_queue *rxq)
743{
744 _i40e_rx_queue_release_mbufs_vec(rxq);
745}
746
f51cf556 747int __rte_cold
7c673cae
FG
748i40e_rxq_vec_setup(struct i40e_rx_queue *rxq)
749{
750 return i40e_rxq_vec_setup_default(rxq);
751}
752
f51cf556 753int __rte_cold
7c673cae
FG
754i40e_txq_vec_setup(struct i40e_tx_queue __rte_unused *txq)
755{
756 return 0;
757}
758
f51cf556 759int __rte_cold
7c673cae
FG
760i40e_rx_vec_dev_conf_condition_check(struct rte_eth_dev *dev)
761{
762 return i40e_rx_vec_dev_conf_condition_check_default(dev);
763}