]>
Commit | Line | Data |
---|---|---|
9f95a23c | 1 | /* SPDX-License-Identifier: BSD-3-Clause |
11fdf7f2 TL |
2 | * |
3 | * Copyright (c) 2016 Freescale Semiconductor, Inc. All rights reserved. | |
9f95a23c | 4 | * Copyright 2016-2018 NXP |
11fdf7f2 | 5 | * |
11fdf7f2 TL |
6 | */ |
7 | ||
8 | #include <time.h> | |
9 | #include <net/if.h> | |
10 | ||
11 | #include <rte_mbuf.h> | |
9f95a23c | 12 | #include <rte_ethdev_driver.h> |
11fdf7f2 TL |
13 | #include <rte_malloc.h> |
14 | #include <rte_memcpy.h> | |
15 | #include <rte_string_fns.h> | |
16 | #include <rte_dev.h> | |
11fdf7f2 | 17 | |
9f95a23c | 18 | #include <rte_fslmc.h> |
11fdf7f2 TL |
19 | #include <fslmc_vfio.h> |
20 | #include <dpaa2_hw_pvt.h> | |
21 | #include <dpaa2_hw_dpio.h> | |
22 | #include <dpaa2_hw_mempool.h> | |
23 | ||
9f95a23c | 24 | #include "dpaa2_pmd_logs.h" |
11fdf7f2 TL |
25 | #include "dpaa2_ethdev.h" |
26 | #include "base/dpaa2_hw_dpni_annot.h" | |
27 | ||
28 | static inline uint32_t __attribute__((hot)) | |
9f95a23c TL |
29 | dpaa2_dev_rx_parse_slow(struct rte_mbuf *mbuf, |
30 | struct dpaa2_annot_hdr *annotation); | |
31 | ||
32 | #define DPAA2_MBUF_TO_CONTIG_FD(_mbuf, _fd, _bpid) do { \ | |
33 | DPAA2_SET_FD_ADDR(_fd, DPAA2_MBUF_VADDR_TO_IOVA(_mbuf)); \ | |
34 | DPAA2_SET_FD_LEN(_fd, _mbuf->data_len); \ | |
35 | DPAA2_SET_ONLY_FD_BPID(_fd, _bpid); \ | |
36 | DPAA2_SET_FD_OFFSET(_fd, _mbuf->data_off); \ | |
37 | DPAA2_SET_FD_FRC(_fd, 0); \ | |
38 | DPAA2_RESET_FD_CTRL(_fd); \ | |
39 | DPAA2_RESET_FD_FLC(_fd); \ | |
40 | } while (0) | |
41 | ||
42 | static inline void __attribute__((hot)) | |
43 | dpaa2_dev_rx_parse_new(struct rte_mbuf *m, const struct qbman_fd *fd) | |
44 | { | |
45 | struct dpaa2_annot_hdr *annotation; | |
46 | uint16_t frc = DPAA2_GET_FD_FRC_PARSE_SUM(fd); | |
47 | ||
48 | m->packet_type = RTE_PTYPE_UNKNOWN; | |
49 | switch (frc) { | |
50 | case DPAA2_PKT_TYPE_ETHER: | |
51 | m->packet_type = RTE_PTYPE_L2_ETHER; | |
52 | break; | |
53 | case DPAA2_PKT_TYPE_IPV4: | |
54 | m->packet_type = RTE_PTYPE_L2_ETHER | | |
55 | RTE_PTYPE_L3_IPV4; | |
56 | break; | |
57 | case DPAA2_PKT_TYPE_IPV6: | |
58 | m->packet_type = RTE_PTYPE_L2_ETHER | | |
59 | RTE_PTYPE_L3_IPV6; | |
60 | break; | |
61 | case DPAA2_PKT_TYPE_IPV4_EXT: | |
62 | m->packet_type = RTE_PTYPE_L2_ETHER | | |
63 | RTE_PTYPE_L3_IPV4_EXT; | |
64 | break; | |
65 | case DPAA2_PKT_TYPE_IPV6_EXT: | |
66 | m->packet_type = RTE_PTYPE_L2_ETHER | | |
67 | RTE_PTYPE_L3_IPV6_EXT; | |
68 | break; | |
69 | case DPAA2_PKT_TYPE_IPV4_TCP: | |
70 | m->packet_type = RTE_PTYPE_L2_ETHER | | |
71 | RTE_PTYPE_L3_IPV4 | RTE_PTYPE_L4_TCP; | |
72 | break; | |
73 | case DPAA2_PKT_TYPE_IPV6_TCP: | |
74 | m->packet_type = RTE_PTYPE_L2_ETHER | | |
75 | RTE_PTYPE_L3_IPV6 | RTE_PTYPE_L4_TCP; | |
76 | break; | |
77 | case DPAA2_PKT_TYPE_IPV4_UDP: | |
78 | m->packet_type = RTE_PTYPE_L2_ETHER | | |
79 | RTE_PTYPE_L3_IPV4 | RTE_PTYPE_L4_UDP; | |
80 | break; | |
81 | case DPAA2_PKT_TYPE_IPV6_UDP: | |
82 | m->packet_type = RTE_PTYPE_L2_ETHER | | |
83 | RTE_PTYPE_L3_IPV6 | RTE_PTYPE_L4_UDP; | |
84 | break; | |
85 | case DPAA2_PKT_TYPE_IPV4_SCTP: | |
86 | m->packet_type = RTE_PTYPE_L2_ETHER | | |
87 | RTE_PTYPE_L3_IPV4 | RTE_PTYPE_L4_SCTP; | |
88 | break; | |
89 | case DPAA2_PKT_TYPE_IPV6_SCTP: | |
90 | m->packet_type = RTE_PTYPE_L2_ETHER | | |
91 | RTE_PTYPE_L3_IPV6 | RTE_PTYPE_L4_SCTP; | |
92 | break; | |
93 | case DPAA2_PKT_TYPE_IPV4_ICMP: | |
94 | m->packet_type = RTE_PTYPE_L2_ETHER | | |
95 | RTE_PTYPE_L3_IPV4 | RTE_PTYPE_L4_ICMP; | |
96 | break; | |
97 | case DPAA2_PKT_TYPE_IPV6_ICMP: | |
98 | m->packet_type = RTE_PTYPE_L2_ETHER | | |
99 | RTE_PTYPE_L3_IPV6 | RTE_PTYPE_L4_ICMP; | |
100 | break; | |
101 | default: | |
102 | m->packet_type = dpaa2_dev_rx_parse_slow(m, | |
103 | (void *)((size_t)DPAA2_IOVA_TO_VADDR(DPAA2_GET_FD_ADDR(fd)) | |
104 | + DPAA2_FD_PTA_SIZE)); | |
105 | } | |
106 | m->hash.rss = fd->simple.flc_hi; | |
107 | m->ol_flags |= PKT_RX_RSS_HASH; | |
108 | ||
109 | if (dpaa2_enable_ts == PMD_DPAA2_ENABLE_TS) { | |
110 | annotation = (struct dpaa2_annot_hdr *) | |
111 | ((size_t)DPAA2_IOVA_TO_VADDR( | |
112 | DPAA2_GET_FD_ADDR(fd)) + DPAA2_FD_PTA_SIZE); | |
113 | m->timestamp = annotation->word2; | |
114 | m->ol_flags |= PKT_RX_TIMESTAMP; | |
115 | DPAA2_PMD_DP_DEBUG("pkt timestamp:0x%" PRIx64 "", m->timestamp); | |
116 | } | |
117 | ||
118 | DPAA2_PMD_DP_DEBUG("HW frc = 0x%x\t packet type =0x%x " | |
119 | "ol_flags =0x%" PRIx64 "", | |
120 | frc, m->packet_type, m->ol_flags); | |
121 | } | |
122 | ||
123 | static inline uint32_t __attribute__((hot)) | |
124 | dpaa2_dev_rx_parse_slow(struct rte_mbuf *mbuf, | |
125 | struct dpaa2_annot_hdr *annotation) | |
11fdf7f2 TL |
126 | { |
127 | uint32_t pkt_type = RTE_PTYPE_UNKNOWN; | |
9f95a23c TL |
128 | uint16_t *vlan_tci; |
129 | ||
130 | DPAA2_PMD_DP_DEBUG("(slow parse)annotation(3)=0x%" PRIx64 "\t" | |
131 | "(4)=0x%" PRIx64 "\t", | |
132 | annotation->word3, annotation->word4); | |
11fdf7f2 | 133 | |
9f95a23c TL |
134 | if (BIT_ISSET_AT_POS(annotation->word3, L2_VLAN_1_PRESENT)) { |
135 | vlan_tci = rte_pktmbuf_mtod_offset(mbuf, uint16_t *, | |
136 | (VLAN_TCI_OFFSET_1(annotation->word5) >> 16)); | |
137 | mbuf->vlan_tci = rte_be_to_cpu_16(*vlan_tci); | |
138 | mbuf->ol_flags |= PKT_RX_VLAN; | |
139 | pkt_type |= RTE_PTYPE_L2_ETHER_VLAN; | |
140 | } else if (BIT_ISSET_AT_POS(annotation->word3, L2_VLAN_N_PRESENT)) { | |
141 | vlan_tci = rte_pktmbuf_mtod_offset(mbuf, uint16_t *, | |
142 | (VLAN_TCI_OFFSET_1(annotation->word5) >> 16)); | |
143 | mbuf->vlan_tci = rte_be_to_cpu_16(*vlan_tci); | |
144 | mbuf->ol_flags |= PKT_RX_VLAN | PKT_RX_QINQ; | |
145 | pkt_type |= RTE_PTYPE_L2_ETHER_QINQ; | |
146 | } | |
11fdf7f2 TL |
147 | |
148 | if (BIT_ISSET_AT_POS(annotation->word3, L2_ARP_PRESENT)) { | |
9f95a23c | 149 | pkt_type |= RTE_PTYPE_L2_ETHER_ARP; |
11fdf7f2 TL |
150 | goto parse_done; |
151 | } else if (BIT_ISSET_AT_POS(annotation->word3, L2_ETH_MAC_PRESENT)) { | |
9f95a23c | 152 | pkt_type |= RTE_PTYPE_L2_ETHER; |
11fdf7f2 TL |
153 | } else { |
154 | goto parse_done; | |
155 | } | |
156 | ||
157 | if (BIT_ISSET_AT_POS(annotation->word4, L3_IPV4_1_PRESENT | | |
158 | L3_IPV4_N_PRESENT)) { | |
159 | pkt_type |= RTE_PTYPE_L3_IPV4; | |
160 | if (BIT_ISSET_AT_POS(annotation->word4, L3_IP_1_OPT_PRESENT | | |
161 | L3_IP_N_OPT_PRESENT)) | |
162 | pkt_type |= RTE_PTYPE_L3_IPV4_EXT; | |
163 | ||
164 | } else if (BIT_ISSET_AT_POS(annotation->word4, L3_IPV6_1_PRESENT | | |
165 | L3_IPV6_N_PRESENT)) { | |
166 | pkt_type |= RTE_PTYPE_L3_IPV6; | |
167 | if (BIT_ISSET_AT_POS(annotation->word4, L3_IP_1_OPT_PRESENT | | |
168 | L3_IP_N_OPT_PRESENT)) | |
169 | pkt_type |= RTE_PTYPE_L3_IPV6_EXT; | |
170 | } else { | |
171 | goto parse_done; | |
172 | } | |
173 | ||
9f95a23c TL |
174 | if (BIT_ISSET_AT_POS(annotation->word8, DPAA2_ETH_FAS_L3CE)) |
175 | mbuf->ol_flags |= PKT_RX_IP_CKSUM_BAD; | |
176 | else if (BIT_ISSET_AT_POS(annotation->word8, DPAA2_ETH_FAS_L4CE)) | |
177 | mbuf->ol_flags |= PKT_RX_L4_CKSUM_BAD; | |
178 | ||
11fdf7f2 TL |
179 | if (BIT_ISSET_AT_POS(annotation->word4, L3_IP_1_FIRST_FRAGMENT | |
180 | L3_IP_1_MORE_FRAGMENT | | |
181 | L3_IP_N_FIRST_FRAGMENT | | |
182 | L3_IP_N_MORE_FRAGMENT)) { | |
183 | pkt_type |= RTE_PTYPE_L4_FRAG; | |
184 | goto parse_done; | |
185 | } else { | |
186 | pkt_type |= RTE_PTYPE_L4_NONFRAG; | |
187 | } | |
188 | ||
189 | if (BIT_ISSET_AT_POS(annotation->word4, L3_PROTO_UDP_PRESENT)) | |
190 | pkt_type |= RTE_PTYPE_L4_UDP; | |
191 | ||
192 | else if (BIT_ISSET_AT_POS(annotation->word4, L3_PROTO_TCP_PRESENT)) | |
193 | pkt_type |= RTE_PTYPE_L4_TCP; | |
194 | ||
195 | else if (BIT_ISSET_AT_POS(annotation->word4, L3_PROTO_SCTP_PRESENT)) | |
196 | pkt_type |= RTE_PTYPE_L4_SCTP; | |
197 | ||
198 | else if (BIT_ISSET_AT_POS(annotation->word4, L3_PROTO_ICMP_PRESENT)) | |
199 | pkt_type |= RTE_PTYPE_L4_ICMP; | |
200 | ||
201 | else if (BIT_ISSET_AT_POS(annotation->word4, L3_IP_UNKNOWN_PROTOCOL)) | |
202 | pkt_type |= RTE_PTYPE_UNKNOWN; | |
203 | ||
204 | parse_done: | |
205 | return pkt_type; | |
206 | } | |
207 | ||
9f95a23c TL |
208 | static inline uint32_t __attribute__((hot)) |
209 | dpaa2_dev_rx_parse(struct rte_mbuf *mbuf, void *hw_annot_addr) | |
11fdf7f2 TL |
210 | { |
211 | struct dpaa2_annot_hdr *annotation = | |
9f95a23c | 212 | (struct dpaa2_annot_hdr *)hw_annot_addr; |
11fdf7f2 | 213 | |
9f95a23c TL |
214 | DPAA2_PMD_DP_DEBUG("(fast parse) Annotation = 0x%" PRIx64 "\t", |
215 | annotation->word4); | |
11fdf7f2 TL |
216 | |
217 | if (BIT_ISSET_AT_POS(annotation->word8, DPAA2_ETH_FAS_L3CE)) | |
218 | mbuf->ol_flags |= PKT_RX_IP_CKSUM_BAD; | |
9f95a23c | 219 | else if (BIT_ISSET_AT_POS(annotation->word8, DPAA2_ETH_FAS_L4CE)) |
11fdf7f2 | 220 | mbuf->ol_flags |= PKT_RX_L4_CKSUM_BAD; |
9f95a23c TL |
221 | |
222 | mbuf->ol_flags |= PKT_RX_TIMESTAMP; | |
223 | mbuf->timestamp = annotation->word2; | |
224 | DPAA2_PMD_DP_DEBUG("pkt timestamp: 0x%" PRIx64 "", mbuf->timestamp); | |
225 | ||
226 | /* Check detailed parsing requirement */ | |
227 | if (annotation->word3 & 0x7FFFFC3FFFF) | |
228 | return dpaa2_dev_rx_parse_slow(mbuf, annotation); | |
229 | ||
230 | /* Return some common types from parse processing */ | |
231 | switch (annotation->word4) { | |
232 | case DPAA2_L3_IPv4: | |
233 | return RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4; | |
234 | case DPAA2_L3_IPv6: | |
235 | return RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6; | |
236 | case DPAA2_L3_IPv4_TCP: | |
237 | return RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4 | | |
238 | RTE_PTYPE_L4_TCP; | |
239 | case DPAA2_L3_IPv4_UDP: | |
240 | return RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4 | | |
241 | RTE_PTYPE_L4_UDP; | |
242 | case DPAA2_L3_IPv6_TCP: | |
243 | return RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6 | | |
244 | RTE_PTYPE_L4_TCP; | |
245 | case DPAA2_L3_IPv6_UDP: | |
246 | return RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6 | | |
247 | RTE_PTYPE_L4_UDP; | |
248 | default: | |
249 | break; | |
250 | } | |
251 | ||
252 | return dpaa2_dev_rx_parse_slow(mbuf, annotation); | |
253 | } | |
254 | ||
255 | static inline struct rte_mbuf *__attribute__((hot)) | |
256 | eth_sg_fd_to_mbuf(const struct qbman_fd *fd) | |
257 | { | |
258 | struct qbman_sge *sgt, *sge; | |
259 | size_t sg_addr, fd_addr; | |
260 | int i = 0; | |
261 | struct rte_mbuf *first_seg, *next_seg, *cur_seg, *temp; | |
262 | ||
263 | fd_addr = (size_t)DPAA2_IOVA_TO_VADDR(DPAA2_GET_FD_ADDR(fd)); | |
264 | ||
265 | /* Get Scatter gather table address */ | |
266 | sgt = (struct qbman_sge *)(fd_addr + DPAA2_GET_FD_OFFSET(fd)); | |
267 | ||
268 | sge = &sgt[i++]; | |
269 | sg_addr = (size_t)DPAA2_IOVA_TO_VADDR(DPAA2_GET_FLE_ADDR(sge)); | |
270 | ||
271 | /* First Scatter gather entry */ | |
272 | first_seg = DPAA2_INLINE_MBUF_FROM_BUF(sg_addr, | |
273 | rte_dpaa2_bpid_info[DPAA2_GET_FD_BPID(fd)].meta_data_size); | |
274 | /* Prepare all the metadata for first segment */ | |
275 | first_seg->buf_addr = (uint8_t *)sg_addr; | |
276 | first_seg->ol_flags = 0; | |
277 | first_seg->data_off = DPAA2_GET_FLE_OFFSET(sge); | |
278 | first_seg->data_len = sge->length & 0x1FFFF; | |
279 | first_seg->pkt_len = DPAA2_GET_FD_LEN(fd); | |
280 | first_seg->nb_segs = 1; | |
281 | first_seg->next = NULL; | |
282 | if (dpaa2_svr_family == SVR_LX2160A) | |
283 | dpaa2_dev_rx_parse_new(first_seg, fd); | |
284 | else | |
285 | first_seg->packet_type = dpaa2_dev_rx_parse(first_seg, | |
286 | (void *)((size_t)DPAA2_IOVA_TO_VADDR(DPAA2_GET_FD_ADDR(fd)) | |
287 | + DPAA2_FD_PTA_SIZE)); | |
288 | ||
289 | rte_mbuf_refcnt_set(first_seg, 1); | |
290 | cur_seg = first_seg; | |
291 | while (!DPAA2_SG_IS_FINAL(sge)) { | |
292 | sge = &sgt[i++]; | |
293 | sg_addr = (size_t)DPAA2_IOVA_TO_VADDR( | |
294 | DPAA2_GET_FLE_ADDR(sge)); | |
295 | next_seg = DPAA2_INLINE_MBUF_FROM_BUF(sg_addr, | |
296 | rte_dpaa2_bpid_info[DPAA2_GET_FLE_BPID(sge)].meta_data_size); | |
297 | next_seg->buf_addr = (uint8_t *)sg_addr; | |
298 | next_seg->data_off = DPAA2_GET_FLE_OFFSET(sge); | |
299 | next_seg->data_len = sge->length & 0x1FFFF; | |
300 | first_seg->nb_segs += 1; | |
301 | rte_mbuf_refcnt_set(next_seg, 1); | |
302 | cur_seg->next = next_seg; | |
303 | next_seg->next = NULL; | |
304 | cur_seg = next_seg; | |
305 | } | |
306 | temp = DPAA2_INLINE_MBUF_FROM_BUF(fd_addr, | |
307 | rte_dpaa2_bpid_info[DPAA2_GET_FD_BPID(fd)].meta_data_size); | |
308 | rte_mbuf_refcnt_set(temp, 1); | |
309 | rte_pktmbuf_free_seg(temp); | |
310 | ||
311 | return (void *)first_seg; | |
11fdf7f2 TL |
312 | } |
313 | ||
314 | static inline struct rte_mbuf *__attribute__((hot)) | |
315 | eth_fd_to_mbuf(const struct qbman_fd *fd) | |
316 | { | |
317 | struct rte_mbuf *mbuf = DPAA2_INLINE_MBUF_FROM_BUF( | |
318 | DPAA2_IOVA_TO_VADDR(DPAA2_GET_FD_ADDR(fd)), | |
319 | rte_dpaa2_bpid_info[DPAA2_GET_FD_BPID(fd)].meta_data_size); | |
320 | ||
321 | /* need to repopulated some of the fields, | |
322 | * as they may have changed in last transmission | |
323 | */ | |
324 | mbuf->nb_segs = 1; | |
325 | mbuf->ol_flags = 0; | |
326 | mbuf->data_off = DPAA2_GET_FD_OFFSET(fd); | |
327 | mbuf->data_len = DPAA2_GET_FD_LEN(fd); | |
328 | mbuf->pkt_len = mbuf->data_len; | |
9f95a23c TL |
329 | mbuf->next = NULL; |
330 | rte_mbuf_refcnt_set(mbuf, 1); | |
11fdf7f2 TL |
331 | |
332 | /* Parse the packet */ | |
9f95a23c TL |
333 | /* parse results for LX2 are there in FRC field of FD. |
334 | * For other DPAA2 platforms , parse results are after | |
335 | * the private - sw annotation area | |
336 | */ | |
11fdf7f2 | 337 | |
9f95a23c TL |
338 | if (dpaa2_svr_family == SVR_LX2160A) |
339 | dpaa2_dev_rx_parse_new(mbuf, fd); | |
340 | else | |
341 | mbuf->packet_type = dpaa2_dev_rx_parse(mbuf, | |
342 | (void *)((size_t)DPAA2_IOVA_TO_VADDR(DPAA2_GET_FD_ADDR(fd)) | |
343 | + DPAA2_FD_PTA_SIZE)); | |
11fdf7f2 | 344 | |
9f95a23c TL |
345 | DPAA2_PMD_DP_DEBUG("to mbuf - mbuf =%p, mbuf->buf_addr =%p, off = %d," |
346 | "fd_off=%d fd =%" PRIx64 ", meta = %d bpid =%d, len=%d\n", | |
11fdf7f2 TL |
347 | mbuf, mbuf->buf_addr, mbuf->data_off, |
348 | DPAA2_GET_FD_OFFSET(fd), DPAA2_GET_FD_ADDR(fd), | |
349 | rte_dpaa2_bpid_info[DPAA2_GET_FD_BPID(fd)].meta_data_size, | |
350 | DPAA2_GET_FD_BPID(fd), DPAA2_GET_FD_LEN(fd)); | |
351 | ||
352 | return mbuf; | |
353 | } | |
354 | ||
9f95a23c TL |
355 | static int __attribute__ ((noinline)) __attribute__((hot)) |
356 | eth_mbuf_to_sg_fd(struct rte_mbuf *mbuf, | |
357 | struct qbman_fd *fd, uint16_t bpid) | |
358 | { | |
359 | struct rte_mbuf *cur_seg = mbuf, *prev_seg, *mi, *temp; | |
360 | struct qbman_sge *sgt, *sge = NULL; | |
361 | int i; | |
362 | ||
363 | temp = rte_pktmbuf_alloc(mbuf->pool); | |
364 | if (temp == NULL) { | |
365 | DPAA2_PMD_DP_DEBUG("No memory to allocate S/G table\n"); | |
366 | return -ENOMEM; | |
367 | } | |
368 | ||
369 | DPAA2_SET_FD_ADDR(fd, DPAA2_MBUF_VADDR_TO_IOVA(temp)); | |
370 | DPAA2_SET_FD_LEN(fd, mbuf->pkt_len); | |
371 | DPAA2_SET_ONLY_FD_BPID(fd, bpid); | |
372 | DPAA2_SET_FD_OFFSET(fd, temp->data_off); | |
373 | DPAA2_SET_FD_ASAL(fd, DPAA2_ASAL_VAL); | |
374 | DPAA2_FD_SET_FORMAT(fd, qbman_fd_sg); | |
375 | /*Set Scatter gather table and Scatter gather entries*/ | |
376 | sgt = (struct qbman_sge *)( | |
377 | (size_t)DPAA2_IOVA_TO_VADDR(DPAA2_GET_FD_ADDR(fd)) | |
378 | + DPAA2_GET_FD_OFFSET(fd)); | |
379 | ||
380 | for (i = 0; i < mbuf->nb_segs; i++) { | |
381 | sge = &sgt[i]; | |
382 | /*Resetting the buffer pool id and offset field*/ | |
383 | sge->fin_bpid_offset = 0; | |
384 | DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(cur_seg)); | |
385 | DPAA2_SET_FLE_OFFSET(sge, cur_seg->data_off); | |
386 | sge->length = cur_seg->data_len; | |
387 | if (RTE_MBUF_DIRECT(cur_seg)) { | |
388 | if (rte_mbuf_refcnt_read(cur_seg) > 1) { | |
389 | /* If refcnt > 1, invalid bpid is set to ensure | |
390 | * buffer is not freed by HW | |
391 | */ | |
392 | DPAA2_SET_FLE_IVP(sge); | |
393 | rte_mbuf_refcnt_update(cur_seg, -1); | |
394 | } else | |
395 | DPAA2_SET_FLE_BPID(sge, | |
396 | mempool_to_bpid(cur_seg->pool)); | |
397 | cur_seg = cur_seg->next; | |
398 | } else { | |
399 | /* Get owner MBUF from indirect buffer */ | |
400 | mi = rte_mbuf_from_indirect(cur_seg); | |
401 | if (rte_mbuf_refcnt_read(mi) > 1) { | |
402 | /* If refcnt > 1, invalid bpid is set to ensure | |
403 | * owner buffer is not freed by HW | |
404 | */ | |
405 | DPAA2_SET_FLE_IVP(sge); | |
406 | } else { | |
407 | DPAA2_SET_FLE_BPID(sge, | |
408 | mempool_to_bpid(mi->pool)); | |
409 | rte_mbuf_refcnt_update(mi, 1); | |
410 | } | |
411 | prev_seg = cur_seg; | |
412 | cur_seg = cur_seg->next; | |
413 | prev_seg->next = NULL; | |
414 | rte_pktmbuf_free(prev_seg); | |
415 | } | |
416 | } | |
417 | DPAA2_SG_SET_FINAL(sge, true); | |
418 | return 0; | |
419 | } | |
420 | ||
421 | static void | |
422 | eth_mbuf_to_fd(struct rte_mbuf *mbuf, | |
423 | struct qbman_fd *fd, uint16_t bpid) __attribute__((unused)); | |
424 | ||
11fdf7f2 TL |
425 | static void __attribute__ ((noinline)) __attribute__((hot)) |
426 | eth_mbuf_to_fd(struct rte_mbuf *mbuf, | |
427 | struct qbman_fd *fd, uint16_t bpid) | |
428 | { | |
9f95a23c | 429 | DPAA2_MBUF_TO_CONTIG_FD(mbuf, fd, bpid); |
11fdf7f2 | 430 | |
9f95a23c TL |
431 | DPAA2_PMD_DP_DEBUG("mbuf =%p, mbuf->buf_addr =%p, off = %d," |
432 | "fd_off=%d fd =%" PRIx64 ", meta = %d bpid =%d, len=%d\n", | |
11fdf7f2 TL |
433 | mbuf, mbuf->buf_addr, mbuf->data_off, |
434 | DPAA2_GET_FD_OFFSET(fd), DPAA2_GET_FD_ADDR(fd), | |
435 | rte_dpaa2_bpid_info[DPAA2_GET_FD_BPID(fd)].meta_data_size, | |
436 | DPAA2_GET_FD_BPID(fd), DPAA2_GET_FD_LEN(fd)); | |
9f95a23c TL |
437 | if (RTE_MBUF_DIRECT(mbuf)) { |
438 | if (rte_mbuf_refcnt_read(mbuf) > 1) { | |
439 | DPAA2_SET_FD_IVP(fd); | |
440 | rte_mbuf_refcnt_update(mbuf, -1); | |
441 | } | |
442 | } else { | |
443 | struct rte_mbuf *mi; | |
11fdf7f2 | 444 | |
9f95a23c TL |
445 | mi = rte_mbuf_from_indirect(mbuf); |
446 | if (rte_mbuf_refcnt_read(mi) > 1) | |
447 | DPAA2_SET_FD_IVP(fd); | |
448 | else | |
449 | rte_mbuf_refcnt_update(mi, 1); | |
450 | rte_pktmbuf_free(mbuf); | |
451 | } | |
452 | } | |
11fdf7f2 TL |
453 | |
454 | static inline int __attribute__((hot)) | |
455 | eth_copy_mbuf_to_fd(struct rte_mbuf *mbuf, | |
456 | struct qbman_fd *fd, uint16_t bpid) | |
457 | { | |
458 | struct rte_mbuf *m; | |
459 | void *mb = NULL; | |
460 | ||
461 | if (rte_dpaa2_mbuf_alloc_bulk( | |
462 | rte_dpaa2_bpid_info[bpid].bp_list->mp, &mb, 1)) { | |
9f95a23c | 463 | DPAA2_PMD_DP_DEBUG("Unable to allocated DPAA2 buffer\n"); |
11fdf7f2 TL |
464 | return -1; |
465 | } | |
466 | m = (struct rte_mbuf *)mb; | |
467 | memcpy((char *)m->buf_addr + mbuf->data_off, | |
468 | (void *)((char *)mbuf->buf_addr + mbuf->data_off), | |
469 | mbuf->pkt_len); | |
470 | ||
471 | /* Copy required fields */ | |
472 | m->data_off = mbuf->data_off; | |
473 | m->ol_flags = mbuf->ol_flags; | |
474 | m->packet_type = mbuf->packet_type; | |
475 | m->tx_offload = mbuf->tx_offload; | |
476 | ||
9f95a23c | 477 | DPAA2_MBUF_TO_CONTIG_FD(m, fd, bpid); |
11fdf7f2 | 478 | |
9f95a23c TL |
479 | DPAA2_PMD_DP_DEBUG( |
480 | "mbuf: %p, BMAN buf addr: %p, fdaddr: %" PRIx64 ", bpid: %d," | |
481 | " meta: %d, off: %d, len: %d\n", | |
482 | (void *)mbuf, | |
483 | mbuf->buf_addr, | |
484 | DPAA2_GET_FD_ADDR(fd), | |
11fdf7f2 TL |
485 | DPAA2_GET_FD_BPID(fd), |
486 | rte_dpaa2_bpid_info[DPAA2_GET_FD_BPID(fd)].meta_data_size, | |
487 | DPAA2_GET_FD_OFFSET(fd), | |
488 | DPAA2_GET_FD_LEN(fd)); | |
11fdf7f2 | 489 | |
9f95a23c | 490 | return 0; |
11fdf7f2 TL |
491 | } |
492 | ||
9f95a23c TL |
493 | /* This function assumes that caller will be keep the same value for nb_pkts |
494 | * across calls per queue, if that is not the case, better use non-prefetch | |
495 | * version of rx call. | |
496 | * It will return the packets as requested in previous call without honoring | |
497 | * the current nb_pkts or bufs space. | |
498 | */ | |
11fdf7f2 | 499 | uint16_t |
9f95a23c | 500 | dpaa2_dev_prefetch_rx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts) |
11fdf7f2 | 501 | { |
9f95a23c | 502 | /* Function receive frames for a given device and VQ*/ |
11fdf7f2 | 503 | struct dpaa2_queue *dpaa2_q = (struct dpaa2_queue *)queue; |
9f95a23c | 504 | struct qbman_result *dq_storage, *dq_storage1 = NULL; |
11fdf7f2 | 505 | uint32_t fqid = dpaa2_q->fqid; |
9f95a23c TL |
506 | int ret, num_rx = 0, pull_size; |
507 | uint8_t pending, status; | |
11fdf7f2 | 508 | struct qbman_swp *swp; |
9f95a23c | 509 | const struct qbman_fd *fd, *next_fd; |
11fdf7f2 | 510 | struct qbman_pull_desc pulldesc; |
9f95a23c TL |
511 | struct queue_storage_info_t *q_storage = dpaa2_q->q_storage; |
512 | struct rte_eth_dev_data *eth_data = dpaa2_q->eth_data; | |
11fdf7f2 | 513 | |
9f95a23c TL |
514 | if (unlikely(!DPAA2_PER_LCORE_ETHRX_DPIO)) { |
515 | ret = dpaa2_affine_qbman_ethrx_swp(); | |
11fdf7f2 | 516 | if (ret) { |
9f95a23c | 517 | DPAA2_PMD_ERR("Failure in affining portal"); |
11fdf7f2 TL |
518 | return 0; |
519 | } | |
520 | } | |
11fdf7f2 | 521 | |
9f95a23c TL |
522 | if (unlikely(!rte_dpaa2_bpid_info && |
523 | rte_eal_process_type() == RTE_PROC_SECONDARY)) | |
524 | rte_dpaa2_bpid_info = dpaa2_q->bp_array; | |
11fdf7f2 | 525 | |
9f95a23c TL |
526 | swp = DPAA2_PER_LCORE_ETHRX_PORTAL; |
527 | pull_size = (nb_pkts > dpaa2_dqrr_size) ? dpaa2_dqrr_size : nb_pkts; | |
528 | if (unlikely(!q_storage->active_dqs)) { | |
529 | q_storage->toggle = 0; | |
530 | dq_storage = q_storage->dq_storage[q_storage->toggle]; | |
531 | q_storage->last_num_pkts = pull_size; | |
532 | qbman_pull_desc_clear(&pulldesc); | |
533 | qbman_pull_desc_set_numframes(&pulldesc, | |
534 | q_storage->last_num_pkts); | |
535 | qbman_pull_desc_set_fq(&pulldesc, fqid); | |
536 | qbman_pull_desc_set_storage(&pulldesc, dq_storage, | |
537 | (uint64_t)(DPAA2_VADDR_TO_IOVA(dq_storage)), 1); | |
538 | if (check_swp_active_dqs(DPAA2_PER_LCORE_ETHRX_DPIO->index)) { | |
539 | while (!qbman_check_command_complete( | |
540 | get_swp_active_dqs( | |
541 | DPAA2_PER_LCORE_ETHRX_DPIO->index))) | |
542 | ; | |
543 | clear_swp_active_dqs(DPAA2_PER_LCORE_ETHRX_DPIO->index); | |
11fdf7f2 | 544 | } |
9f95a23c TL |
545 | while (1) { |
546 | if (qbman_swp_pull(swp, &pulldesc)) { | |
547 | DPAA2_PMD_DP_DEBUG("VDQ command is not issued." | |
548 | " QBMAN is busy (1)\n"); | |
549 | /* Portal was busy, try again */ | |
550 | continue; | |
551 | } | |
552 | break; | |
553 | } | |
554 | q_storage->active_dqs = dq_storage; | |
555 | q_storage->active_dpio_id = DPAA2_PER_LCORE_ETHRX_DPIO->index; | |
556 | set_swp_active_dqs(DPAA2_PER_LCORE_ETHRX_DPIO->index, | |
557 | dq_storage); | |
558 | } | |
11fdf7f2 | 559 | |
9f95a23c TL |
560 | dq_storage = q_storage->active_dqs; |
561 | rte_prefetch0((void *)(size_t)(dq_storage)); | |
562 | rte_prefetch0((void *)(size_t)(dq_storage + 1)); | |
563 | ||
564 | /* Prepare next pull descriptor. This will give space for the | |
565 | * prefething done on DQRR entries | |
11fdf7f2 | 566 | */ |
9f95a23c TL |
567 | q_storage->toggle ^= 1; |
568 | dq_storage1 = q_storage->dq_storage[q_storage->toggle]; | |
569 | qbman_pull_desc_clear(&pulldesc); | |
570 | qbman_pull_desc_set_numframes(&pulldesc, pull_size); | |
571 | qbman_pull_desc_set_fq(&pulldesc, fqid); | |
572 | qbman_pull_desc_set_storage(&pulldesc, dq_storage1, | |
573 | (uint64_t)(DPAA2_VADDR_TO_IOVA(dq_storage1)), 1); | |
574 | ||
575 | /* Check if the previous issued command is completed. | |
576 | * Also seems like the SWP is shared between the Ethernet Driver | |
577 | * and the SEC driver. | |
578 | */ | |
579 | while (!qbman_check_command_complete(dq_storage)) | |
580 | ; | |
581 | if (dq_storage == get_swp_active_dqs(q_storage->active_dpio_id)) | |
582 | clear_swp_active_dqs(q_storage->active_dpio_id); | |
583 | ||
584 | pending = 1; | |
585 | ||
586 | do { | |
11fdf7f2 TL |
587 | /* Loop until the dq_storage is updated with |
588 | * new token by QBMAN | |
589 | */ | |
9f95a23c | 590 | while (!qbman_check_new_result(dq_storage)) |
11fdf7f2 | 591 | ; |
9f95a23c | 592 | rte_prefetch0((void *)((size_t)(dq_storage + 2))); |
11fdf7f2 TL |
593 | /* Check whether Last Pull command is Expired and |
594 | * setting Condition for Loop termination | |
595 | */ | |
596 | if (qbman_result_DQ_is_pull_complete(dq_storage)) { | |
9f95a23c | 597 | pending = 0; |
11fdf7f2 | 598 | /* Check for valid frame. */ |
9f95a23c | 599 | status = qbman_result_DQ_flags(dq_storage); |
11fdf7f2 TL |
600 | if (unlikely((status & QBMAN_DQ_STAT_VALIDFRAME) == 0)) |
601 | continue; | |
602 | } | |
11fdf7f2 | 603 | fd = qbman_result_DQ_fd(dq_storage); |
11fdf7f2 | 604 | |
9f95a23c TL |
605 | if (dpaa2_svr_family != SVR_LX2160A) { |
606 | next_fd = qbman_result_DQ_fd(dq_storage + 1); | |
607 | /* Prefetch Annotation address for the parse results */ | |
608 | rte_prefetch0((void *)(size_t)(DPAA2_GET_FD_ADDR( | |
609 | next_fd) + DPAA2_FD_PTA_SIZE + 16)); | |
610 | } | |
611 | ||
612 | if (unlikely(DPAA2_FD_GET_FORMAT(fd) == qbman_fd_sg)) | |
613 | bufs[num_rx] = eth_sg_fd_to_mbuf(fd); | |
614 | else | |
615 | bufs[num_rx] = eth_fd_to_mbuf(fd); | |
616 | bufs[num_rx]->port = eth_data->port_id; | |
617 | ||
618 | if (eth_data->dev_conf.rxmode.offloads & | |
619 | DEV_RX_OFFLOAD_VLAN_STRIP) | |
620 | rte_vlan_strip(bufs[num_rx]); | |
621 | ||
11fdf7f2 | 622 | dq_storage++; |
9f95a23c TL |
623 | num_rx++; |
624 | } while (pending); | |
625 | ||
626 | if (check_swp_active_dqs(DPAA2_PER_LCORE_ETHRX_DPIO->index)) { | |
627 | while (!qbman_check_command_complete( | |
628 | get_swp_active_dqs(DPAA2_PER_LCORE_ETHRX_DPIO->index))) | |
629 | ; | |
630 | clear_swp_active_dqs(DPAA2_PER_LCORE_ETHRX_DPIO->index); | |
631 | } | |
632 | /* issue a volatile dequeue command for next pull */ | |
633 | while (1) { | |
634 | if (qbman_swp_pull(swp, &pulldesc)) { | |
635 | DPAA2_PMD_DP_DEBUG("VDQ command is not issued." | |
636 | "QBMAN is busy (2)\n"); | |
637 | continue; | |
638 | } | |
639 | break; | |
640 | } | |
641 | q_storage->active_dqs = dq_storage1; | |
642 | q_storage->active_dpio_id = DPAA2_PER_LCORE_ETHRX_DPIO->index; | |
643 | set_swp_active_dqs(DPAA2_PER_LCORE_ETHRX_DPIO->index, dq_storage1); | |
11fdf7f2 TL |
644 | |
645 | dpaa2_q->rx_pkts += num_rx; | |
646 | ||
11fdf7f2 TL |
647 | return num_rx; |
648 | } | |
649 | ||
9f95a23c TL |
650 | void __attribute__((hot)) |
651 | dpaa2_dev_process_parallel_event(struct qbman_swp *swp, | |
652 | const struct qbman_fd *fd, | |
653 | const struct qbman_result *dq, | |
654 | struct dpaa2_queue *rxq, | |
655 | struct rte_event *ev) | |
656 | { | |
657 | rte_prefetch0((void *)(size_t)(DPAA2_GET_FD_ADDR(fd) + | |
658 | DPAA2_FD_PTA_SIZE + 16)); | |
659 | ||
660 | ev->flow_id = rxq->ev.flow_id; | |
661 | ev->sub_event_type = rxq->ev.sub_event_type; | |
662 | ev->event_type = RTE_EVENT_TYPE_ETHDEV; | |
663 | ev->op = RTE_EVENT_OP_NEW; | |
664 | ev->sched_type = rxq->ev.sched_type; | |
665 | ev->queue_id = rxq->ev.queue_id; | |
666 | ev->priority = rxq->ev.priority; | |
667 | ||
668 | ev->mbuf = eth_fd_to_mbuf(fd); | |
669 | ||
670 | qbman_swp_dqrr_consume(swp, dq); | |
671 | } | |
672 | ||
673 | void __attribute__((hot)) | |
674 | dpaa2_dev_process_atomic_event(struct qbman_swp *swp __attribute__((unused)), | |
675 | const struct qbman_fd *fd, | |
676 | const struct qbman_result *dq, | |
677 | struct dpaa2_queue *rxq, | |
678 | struct rte_event *ev) | |
679 | { | |
680 | uint8_t dqrr_index; | |
681 | ||
682 | rte_prefetch0((void *)(size_t)(DPAA2_GET_FD_ADDR(fd) + | |
683 | DPAA2_FD_PTA_SIZE + 16)); | |
684 | ||
685 | ev->flow_id = rxq->ev.flow_id; | |
686 | ev->sub_event_type = rxq->ev.sub_event_type; | |
687 | ev->event_type = RTE_EVENT_TYPE_ETHDEV; | |
688 | ev->op = RTE_EVENT_OP_NEW; | |
689 | ev->sched_type = rxq->ev.sched_type; | |
690 | ev->queue_id = rxq->ev.queue_id; | |
691 | ev->priority = rxq->ev.priority; | |
692 | ||
693 | ev->mbuf = eth_fd_to_mbuf(fd); | |
694 | ||
695 | dqrr_index = qbman_get_dqrr_idx(dq); | |
696 | ev->mbuf->seqn = dqrr_index + 1; | |
697 | DPAA2_PER_LCORE_DQRR_SIZE++; | |
698 | DPAA2_PER_LCORE_DQRR_HELD |= 1 << dqrr_index; | |
699 | DPAA2_PER_LCORE_DQRR_MBUF(dqrr_index) = ev->mbuf; | |
700 | } | |
701 | ||
702 | void __attribute__((hot)) | |
703 | dpaa2_dev_process_ordered_event(struct qbman_swp *swp, | |
704 | const struct qbman_fd *fd, | |
705 | const struct qbman_result *dq, | |
706 | struct dpaa2_queue *rxq, | |
707 | struct rte_event *ev) | |
708 | { | |
709 | rte_prefetch0((void *)(size_t)(DPAA2_GET_FD_ADDR(fd) + | |
710 | DPAA2_FD_PTA_SIZE + 16)); | |
711 | ||
712 | ev->flow_id = rxq->ev.flow_id; | |
713 | ev->sub_event_type = rxq->ev.sub_event_type; | |
714 | ev->event_type = RTE_EVENT_TYPE_ETHDEV; | |
715 | ev->op = RTE_EVENT_OP_NEW; | |
716 | ev->sched_type = rxq->ev.sched_type; | |
717 | ev->queue_id = rxq->ev.queue_id; | |
718 | ev->priority = rxq->ev.priority; | |
719 | ||
720 | ev->mbuf = eth_fd_to_mbuf(fd); | |
721 | ||
722 | ev->mbuf->seqn = DPAA2_ENQUEUE_FLAG_ORP; | |
723 | ev->mbuf->seqn |= qbman_result_DQ_odpid(dq) << DPAA2_EQCR_OPRID_SHIFT; | |
724 | ev->mbuf->seqn |= qbman_result_DQ_seqnum(dq) << DPAA2_EQCR_SEQNUM_SHIFT; | |
725 | ||
726 | qbman_swp_dqrr_consume(swp, dq); | |
727 | } | |
728 | ||
11fdf7f2 TL |
729 | /* |
730 | * Callback to handle sending packets through WRIOP based interface | |
731 | */ | |
732 | uint16_t | |
733 | dpaa2_dev_tx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts) | |
734 | { | |
735 | /* Function to transmit the frames to given device and VQ*/ | |
9f95a23c | 736 | uint32_t loop, retry_count; |
11fdf7f2 TL |
737 | int32_t ret; |
738 | struct qbman_fd fd_arr[MAX_TX_RING_SLOTS]; | |
9f95a23c | 739 | struct rte_mbuf *mi; |
11fdf7f2 TL |
740 | uint32_t frames_to_send; |
741 | struct rte_mempool *mp; | |
742 | struct qbman_eq_desc eqdesc; | |
743 | struct dpaa2_queue *dpaa2_q = (struct dpaa2_queue *)queue; | |
744 | struct qbman_swp *swp; | |
745 | uint16_t num_tx = 0; | |
746 | uint16_t bpid; | |
9f95a23c TL |
747 | struct rte_eth_dev_data *eth_data = dpaa2_q->eth_data; |
748 | struct dpaa2_dev_priv *priv = eth_data->dev_private; | |
749 | uint32_t flags[MAX_TX_RING_SLOTS] = {0}; | |
11fdf7f2 TL |
750 | |
751 | if (unlikely(!DPAA2_PER_LCORE_DPIO)) { | |
752 | ret = dpaa2_affine_qbman_swp(); | |
753 | if (ret) { | |
9f95a23c | 754 | DPAA2_PMD_ERR("Failure in affining portal"); |
11fdf7f2 TL |
755 | return 0; |
756 | } | |
757 | } | |
758 | swp = DPAA2_PER_LCORE_PORTAL; | |
759 | ||
9f95a23c TL |
760 | DPAA2_PMD_DP_DEBUG("===> eth_data =%p, fqid =%d\n", |
761 | eth_data, dpaa2_q->fqid); | |
11fdf7f2 TL |
762 | |
763 | /*Prepare enqueue descriptor*/ | |
764 | qbman_eq_desc_clear(&eqdesc); | |
765 | qbman_eq_desc_set_no_orp(&eqdesc, DPAA2_EQ_RESP_ERR_FQ); | |
11fdf7f2 TL |
766 | qbman_eq_desc_set_qd(&eqdesc, priv->qdid, |
767 | dpaa2_q->flow_id, dpaa2_q->tc_index); | |
11fdf7f2 TL |
768 | /*Clear the unused FD fields before sending*/ |
769 | while (nb_pkts) { | |
9f95a23c TL |
770 | /*Check if the queue is congested*/ |
771 | retry_count = 0; | |
772 | while (qbman_result_SCN_state(dpaa2_q->cscn)) { | |
773 | retry_count++; | |
774 | /* Retry for some time before giving up */ | |
775 | if (retry_count > CONG_RETRY_COUNT) | |
776 | goto skip_tx; | |
777 | } | |
778 | ||
779 | frames_to_send = (nb_pkts > dpaa2_eqcr_size) ? | |
780 | dpaa2_eqcr_size : nb_pkts; | |
11fdf7f2 TL |
781 | |
782 | for (loop = 0; loop < frames_to_send; loop++) { | |
9f95a23c TL |
783 | if ((*bufs)->seqn) { |
784 | uint8_t dqrr_index = (*bufs)->seqn - 1; | |
785 | ||
786 | flags[loop] = QBMAN_ENQUEUE_FLAG_DCA | | |
787 | dqrr_index; | |
788 | DPAA2_PER_LCORE_DQRR_SIZE--; | |
789 | DPAA2_PER_LCORE_DQRR_HELD &= ~(1 << dqrr_index); | |
790 | (*bufs)->seqn = DPAA2_INVALID_MBUF_SEQN; | |
791 | } | |
792 | ||
793 | if (likely(RTE_MBUF_DIRECT(*bufs))) { | |
794 | mp = (*bufs)->pool; | |
795 | /* Check the basic scenario and set | |
796 | * the FD appropriately here itself. | |
797 | */ | |
798 | if (likely(mp && mp->ops_index == | |
799 | priv->bp_list->dpaa2_ops_index && | |
800 | (*bufs)->nb_segs == 1 && | |
801 | rte_mbuf_refcnt_read((*bufs)) == 1)) { | |
802 | if (unlikely(((*bufs)->ol_flags | |
803 | & PKT_TX_VLAN_PKT) || | |
804 | (eth_data->dev_conf.txmode.offloads | |
805 | & DEV_TX_OFFLOAD_VLAN_INSERT))) { | |
806 | ret = rte_vlan_insert(bufs); | |
807 | if (ret) | |
808 | goto send_n_return; | |
809 | } | |
810 | DPAA2_MBUF_TO_CONTIG_FD((*bufs), | |
811 | &fd_arr[loop], mempool_to_bpid(mp)); | |
812 | bufs++; | |
813 | continue; | |
814 | } | |
815 | } else { | |
816 | mi = rte_mbuf_from_indirect(*bufs); | |
817 | mp = mi->pool; | |
818 | } | |
11fdf7f2 | 819 | /* Not a hw_pkt pool allocated frame */ |
9f95a23c TL |
820 | if (unlikely(!mp || !priv->bp_list)) { |
821 | DPAA2_PMD_ERR("Err: No buffer pool attached"); | |
822 | goto send_n_return; | |
823 | } | |
824 | ||
825 | if (unlikely(((*bufs)->ol_flags & PKT_TX_VLAN_PKT) || | |
826 | (eth_data->dev_conf.txmode.offloads | |
827 | & DEV_TX_OFFLOAD_VLAN_INSERT))) { | |
828 | int ret = rte_vlan_insert(bufs); | |
829 | if (ret) | |
830 | goto send_n_return; | |
831 | } | |
11fdf7f2 | 832 | if (mp->ops_index != priv->bp_list->dpaa2_ops_index) { |
9f95a23c | 833 | DPAA2_PMD_WARN("Non DPAA2 buffer pool"); |
11fdf7f2 TL |
834 | /* alloc should be from the default buffer pool |
835 | * attached to this interface | |
836 | */ | |
9f95a23c TL |
837 | bpid = priv->bp_list->buf_pool.bpid; |
838 | ||
839 | if (unlikely((*bufs)->nb_segs > 1)) { | |
840 | DPAA2_PMD_ERR("S/G support not added" | |
841 | " for non hw offload buffer"); | |
842 | goto send_n_return; | |
11fdf7f2 TL |
843 | } |
844 | if (eth_copy_mbuf_to_fd(*bufs, | |
845 | &fd_arr[loop], bpid)) { | |
9f95a23c TL |
846 | goto send_n_return; |
847 | } | |
848 | /* free the original packet */ | |
849 | rte_pktmbuf_free(*bufs); | |
850 | } else { | |
851 | bpid = mempool_to_bpid(mp); | |
852 | if (unlikely((*bufs)->nb_segs > 1)) { | |
853 | if (eth_mbuf_to_sg_fd(*bufs, | |
854 | &fd_arr[loop], bpid)) | |
855 | goto send_n_return; | |
856 | } else { | |
857 | eth_mbuf_to_fd(*bufs, | |
858 | &fd_arr[loop], bpid); | |
859 | } | |
860 | } | |
861 | bufs++; | |
862 | } | |
863 | loop = 0; | |
864 | while (loop < frames_to_send) { | |
865 | loop += qbman_swp_enqueue_multiple(swp, &eqdesc, | |
866 | &fd_arr[loop], &flags[loop], | |
867 | frames_to_send - loop); | |
868 | } | |
869 | ||
870 | num_tx += frames_to_send; | |
871 | nb_pkts -= frames_to_send; | |
872 | } | |
873 | dpaa2_q->tx_pkts += num_tx; | |
874 | return num_tx; | |
875 | ||
876 | send_n_return: | |
877 | /* send any already prepared fd */ | |
878 | if (loop) { | |
879 | unsigned int i = 0; | |
880 | ||
881 | while (i < loop) { | |
882 | i += qbman_swp_enqueue_multiple(swp, &eqdesc, | |
883 | &fd_arr[i], | |
884 | &flags[loop], | |
885 | loop - i); | |
886 | } | |
887 | num_tx += loop; | |
888 | } | |
889 | skip_tx: | |
890 | dpaa2_q->tx_pkts += num_tx; | |
891 | return num_tx; | |
892 | } | |
893 | ||
894 | void | |
895 | dpaa2_dev_free_eqresp_buf(uint16_t eqresp_ci) | |
896 | { | |
897 | struct dpaa2_dpio_dev *dpio_dev = DPAA2_PER_LCORE_DPIO; | |
898 | struct qbman_fd *fd; | |
899 | struct rte_mbuf *m; | |
900 | ||
901 | fd = qbman_result_eqresp_fd(&dpio_dev->eqresp[eqresp_ci]); | |
902 | m = eth_fd_to_mbuf(fd); | |
903 | rte_pktmbuf_free(m); | |
904 | } | |
905 | ||
906 | static void | |
907 | dpaa2_set_enqueue_descriptor(struct dpaa2_queue *dpaa2_q, | |
908 | struct rte_mbuf *m, | |
909 | struct qbman_eq_desc *eqdesc) | |
910 | { | |
911 | struct rte_eth_dev_data *eth_data = dpaa2_q->eth_data; | |
912 | struct dpaa2_dev_priv *priv = eth_data->dev_private; | |
913 | struct dpaa2_dpio_dev *dpio_dev = DPAA2_PER_LCORE_DPIO; | |
914 | struct eqresp_metadata *eqresp_meta; | |
915 | uint16_t orpid, seqnum; | |
916 | uint8_t dq_idx; | |
917 | ||
918 | qbman_eq_desc_set_qd(eqdesc, priv->qdid, dpaa2_q->flow_id, | |
919 | dpaa2_q->tc_index); | |
920 | ||
921 | if (m->seqn & DPAA2_ENQUEUE_FLAG_ORP) { | |
922 | orpid = (m->seqn & DPAA2_EQCR_OPRID_MASK) >> | |
923 | DPAA2_EQCR_OPRID_SHIFT; | |
924 | seqnum = (m->seqn & DPAA2_EQCR_SEQNUM_MASK) >> | |
925 | DPAA2_EQCR_SEQNUM_SHIFT; | |
926 | ||
927 | if (!priv->en_loose_ordered) { | |
928 | qbman_eq_desc_set_orp(eqdesc, 1, orpid, seqnum, 0); | |
929 | qbman_eq_desc_set_response(eqdesc, (uint64_t) | |
930 | DPAA2_VADDR_TO_IOVA(&dpio_dev->eqresp[ | |
931 | dpio_dev->eqresp_pi]), 1); | |
932 | qbman_eq_desc_set_token(eqdesc, 1); | |
933 | ||
934 | eqresp_meta = &dpio_dev->eqresp_meta[ | |
935 | dpio_dev->eqresp_pi]; | |
936 | eqresp_meta->dpaa2_q = dpaa2_q; | |
937 | eqresp_meta->mp = m->pool; | |
938 | ||
939 | dpio_dev->eqresp_pi + 1 < MAX_EQ_RESP_ENTRIES ? | |
940 | dpio_dev->eqresp_pi++ : | |
941 | (dpio_dev->eqresp_pi = 0); | |
942 | } else { | |
943 | qbman_eq_desc_set_orp(eqdesc, 0, orpid, seqnum, 0); | |
944 | } | |
945 | } else { | |
946 | dq_idx = m->seqn - 1; | |
947 | qbman_eq_desc_set_dca(eqdesc, 1, dq_idx, 0); | |
948 | DPAA2_PER_LCORE_DQRR_SIZE--; | |
949 | DPAA2_PER_LCORE_DQRR_HELD &= ~(1 << dq_idx); | |
950 | } | |
951 | m->seqn = DPAA2_INVALID_MBUF_SEQN; | |
952 | } | |
953 | ||
954 | /* Callback to handle sending ordered packets through WRIOP based interface */ | |
955 | uint16_t | |
956 | dpaa2_dev_tx_ordered(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts) | |
957 | { | |
958 | /* Function to transmit the frames to given device and VQ*/ | |
959 | struct dpaa2_queue *dpaa2_q = (struct dpaa2_queue *)queue; | |
960 | struct rte_eth_dev_data *eth_data = dpaa2_q->eth_data; | |
961 | struct dpaa2_dev_priv *priv = eth_data->dev_private; | |
962 | struct dpaa2_queue *order_sendq = (struct dpaa2_queue *)priv->tx_vq[0]; | |
963 | struct qbman_fd fd_arr[MAX_TX_RING_SLOTS]; | |
964 | struct rte_mbuf *mi; | |
965 | struct rte_mempool *mp; | |
966 | struct qbman_eq_desc eqdesc[MAX_TX_RING_SLOTS]; | |
967 | struct qbman_swp *swp; | |
968 | uint32_t frames_to_send, num_free_eq_desc; | |
969 | uint32_t loop, retry_count; | |
970 | int32_t ret; | |
971 | uint16_t num_tx = 0; | |
972 | uint16_t bpid; | |
973 | ||
974 | if (unlikely(!DPAA2_PER_LCORE_DPIO)) { | |
975 | ret = dpaa2_affine_qbman_swp(); | |
976 | if (ret) { | |
977 | DPAA2_PMD_ERR("Failure in affining portal"); | |
978 | return 0; | |
979 | } | |
980 | } | |
981 | swp = DPAA2_PER_LCORE_PORTAL; | |
982 | ||
983 | DPAA2_PMD_DP_DEBUG("===> eth_data =%p, fqid =%d\n", | |
984 | eth_data, dpaa2_q->fqid); | |
985 | ||
986 | /* This would also handle normal and atomic queues as any type | |
987 | * of packet can be enqueued when ordered queues are being used. | |
988 | */ | |
989 | while (nb_pkts) { | |
990 | /*Check if the queue is congested*/ | |
991 | retry_count = 0; | |
992 | while (qbman_result_SCN_state(dpaa2_q->cscn)) { | |
993 | retry_count++; | |
994 | /* Retry for some time before giving up */ | |
995 | if (retry_count > CONG_RETRY_COUNT) | |
996 | goto skip_tx; | |
997 | } | |
998 | ||
999 | frames_to_send = (nb_pkts > dpaa2_eqcr_size) ? | |
1000 | dpaa2_eqcr_size : nb_pkts; | |
1001 | ||
1002 | if (!priv->en_loose_ordered) { | |
1003 | if ((*bufs)->seqn & DPAA2_ENQUEUE_FLAG_ORP) { | |
1004 | num_free_eq_desc = dpaa2_free_eq_descriptors(); | |
1005 | if (num_free_eq_desc < frames_to_send) | |
1006 | frames_to_send = num_free_eq_desc; | |
1007 | } | |
1008 | } | |
1009 | ||
1010 | for (loop = 0; loop < frames_to_send; loop++) { | |
1011 | /*Prepare enqueue descriptor*/ | |
1012 | qbman_eq_desc_clear(&eqdesc[loop]); | |
1013 | ||
1014 | if ((*bufs)->seqn) { | |
1015 | /* Use only queue 0 for Tx in case of atomic/ | |
1016 | * ordered packets as packets can get unordered | |
1017 | * when being tranmitted out from the interface | |
1018 | */ | |
1019 | dpaa2_set_enqueue_descriptor(order_sendq, | |
1020 | (*bufs), | |
1021 | &eqdesc[loop]); | |
1022 | } else { | |
1023 | qbman_eq_desc_set_no_orp(&eqdesc[loop], | |
1024 | DPAA2_EQ_RESP_ERR_FQ); | |
1025 | qbman_eq_desc_set_qd(&eqdesc[loop], priv->qdid, | |
1026 | dpaa2_q->flow_id, | |
1027 | dpaa2_q->tc_index); | |
1028 | } | |
1029 | ||
1030 | if (likely(RTE_MBUF_DIRECT(*bufs))) { | |
1031 | mp = (*bufs)->pool; | |
1032 | /* Check the basic scenario and set | |
1033 | * the FD appropriately here itself. | |
1034 | */ | |
1035 | if (likely(mp && mp->ops_index == | |
1036 | priv->bp_list->dpaa2_ops_index && | |
1037 | (*bufs)->nb_segs == 1 && | |
1038 | rte_mbuf_refcnt_read((*bufs)) == 1)) { | |
1039 | if (unlikely((*bufs)->ol_flags | |
1040 | & PKT_TX_VLAN_PKT)) { | |
1041 | ret = rte_vlan_insert(bufs); | |
1042 | if (ret) | |
1043 | goto send_n_return; | |
1044 | } | |
1045 | DPAA2_MBUF_TO_CONTIG_FD((*bufs), | |
1046 | &fd_arr[loop], | |
1047 | mempool_to_bpid(mp)); | |
11fdf7f2 TL |
1048 | bufs++; |
1049 | continue; | |
1050 | } | |
9f95a23c TL |
1051 | } else { |
1052 | mi = rte_mbuf_from_indirect(*bufs); | |
1053 | mp = mi->pool; | |
1054 | } | |
1055 | /* Not a hw_pkt pool allocated frame */ | |
1056 | if (unlikely(!mp || !priv->bp_list)) { | |
1057 | DPAA2_PMD_ERR("Err: No buffer pool attached"); | |
1058 | goto send_n_return; | |
1059 | } | |
1060 | ||
1061 | if (mp->ops_index != priv->bp_list->dpaa2_ops_index) { | |
1062 | DPAA2_PMD_WARN("Non DPAA2 buffer pool"); | |
1063 | /* alloc should be from the default buffer pool | |
1064 | * attached to this interface | |
1065 | */ | |
1066 | bpid = priv->bp_list->buf_pool.bpid; | |
1067 | ||
1068 | if (unlikely((*bufs)->nb_segs > 1)) { | |
1069 | DPAA2_PMD_ERR( | |
1070 | "S/G not supp for non hw offload buffer"); | |
1071 | goto send_n_return; | |
1072 | } | |
1073 | if (eth_copy_mbuf_to_fd(*bufs, | |
1074 | &fd_arr[loop], bpid)) { | |
1075 | goto send_n_return; | |
1076 | } | |
1077 | /* free the original packet */ | |
1078 | rte_pktmbuf_free(*bufs); | |
11fdf7f2 TL |
1079 | } else { |
1080 | bpid = mempool_to_bpid(mp); | |
9f95a23c TL |
1081 | if (unlikely((*bufs)->nb_segs > 1)) { |
1082 | if (eth_mbuf_to_sg_fd(*bufs, | |
1083 | &fd_arr[loop], | |
1084 | bpid)) | |
1085 | goto send_n_return; | |
1086 | } else { | |
1087 | eth_mbuf_to_fd(*bufs, | |
1088 | &fd_arr[loop], bpid); | |
1089 | } | |
11fdf7f2 TL |
1090 | } |
1091 | bufs++; | |
1092 | } | |
1093 | loop = 0; | |
1094 | while (loop < frames_to_send) { | |
9f95a23c TL |
1095 | loop += qbman_swp_enqueue_multiple_desc(swp, |
1096 | &eqdesc[loop], &fd_arr[loop], | |
1097 | frames_to_send - loop); | |
11fdf7f2 TL |
1098 | } |
1099 | ||
1100 | num_tx += frames_to_send; | |
11fdf7f2 TL |
1101 | nb_pkts -= frames_to_send; |
1102 | } | |
9f95a23c TL |
1103 | dpaa2_q->tx_pkts += num_tx; |
1104 | return num_tx; | |
1105 | ||
1106 | send_n_return: | |
1107 | /* send any already prepared fd */ | |
1108 | if (loop) { | |
1109 | unsigned int i = 0; | |
1110 | ||
1111 | while (i < loop) { | |
1112 | i += qbman_swp_enqueue_multiple_desc(swp, &eqdesc[loop], | |
1113 | &fd_arr[i], loop - i); | |
1114 | } | |
1115 | num_tx += loop; | |
1116 | } | |
11fdf7f2 | 1117 | skip_tx: |
9f95a23c | 1118 | dpaa2_q->tx_pkts += num_tx; |
11fdf7f2 TL |
1119 | return num_tx; |
1120 | } | |
9f95a23c TL |
1121 | |
1122 | /** | |
1123 | * Dummy DPDK callback for TX. | |
1124 | * | |
1125 | * This function is used to temporarily replace the real callback during | |
1126 | * unsafe control operations on the queue, or in case of error. | |
1127 | * | |
1128 | * @param dpdk_txq | |
1129 | * Generic pointer to TX queue structure. | |
1130 | * @param[in] pkts | |
1131 | * Packets to transmit. | |
1132 | * @param pkts_n | |
1133 | * Number of packets in array. | |
1134 | * | |
1135 | * @return | |
1136 | * Number of packets successfully transmitted (<= pkts_n). | |
1137 | */ | |
1138 | uint16_t | |
1139 | dummy_dev_tx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts) | |
1140 | { | |
1141 | (void)queue; | |
1142 | (void)bufs; | |
1143 | (void)nb_pkts; | |
1144 | return 0; | |
1145 | } | |
1146 | ||
1147 | #if defined(RTE_TOOLCHAIN_GCC) | |
1148 | #pragma GCC diagnostic push | |
1149 | #pragma GCC diagnostic ignored "-Wcast-qual" | |
1150 | #elif defined(RTE_TOOLCHAIN_CLANG) | |
1151 | #pragma clang diagnostic push | |
1152 | #pragma clang diagnostic ignored "-Wcast-qual" | |
1153 | #endif | |
1154 | ||
1155 | /* This function loopbacks all the received packets.*/ | |
1156 | uint16_t | |
1157 | dpaa2_dev_loopback_rx(void *queue, | |
1158 | struct rte_mbuf **bufs __rte_unused, | |
1159 | uint16_t nb_pkts) | |
1160 | { | |
1161 | /* Function receive frames for a given device and VQ*/ | |
1162 | struct dpaa2_queue *dpaa2_q = (struct dpaa2_queue *)queue; | |
1163 | struct qbman_result *dq_storage, *dq_storage1 = NULL; | |
1164 | uint32_t fqid = dpaa2_q->fqid; | |
1165 | int ret, num_rx = 0, num_tx = 0, pull_size; | |
1166 | uint8_t pending, status; | |
1167 | struct qbman_swp *swp; | |
1168 | struct qbman_fd *fd[DPAA2_LX2_DQRR_RING_SIZE]; | |
1169 | struct qbman_pull_desc pulldesc; | |
1170 | struct qbman_eq_desc eqdesc; | |
1171 | struct queue_storage_info_t *q_storage = dpaa2_q->q_storage; | |
1172 | struct rte_eth_dev_data *eth_data = dpaa2_q->eth_data; | |
1173 | struct dpaa2_dev_priv *priv = eth_data->dev_private; | |
1174 | struct dpaa2_queue *tx_q = priv->tx_vq[0]; | |
1175 | /* todo - currently we are using 1st TX queue only for loopback*/ | |
1176 | ||
1177 | if (unlikely(!DPAA2_PER_LCORE_ETHRX_DPIO)) { | |
1178 | ret = dpaa2_affine_qbman_ethrx_swp(); | |
1179 | if (ret) { | |
1180 | DPAA2_PMD_ERR("Failure in affining portal"); | |
1181 | return 0; | |
1182 | } | |
1183 | } | |
1184 | swp = DPAA2_PER_LCORE_ETHRX_PORTAL; | |
1185 | pull_size = (nb_pkts > dpaa2_dqrr_size) ? dpaa2_dqrr_size : nb_pkts; | |
1186 | if (unlikely(!q_storage->active_dqs)) { | |
1187 | q_storage->toggle = 0; | |
1188 | dq_storage = q_storage->dq_storage[q_storage->toggle]; | |
1189 | q_storage->last_num_pkts = pull_size; | |
1190 | qbman_pull_desc_clear(&pulldesc); | |
1191 | qbman_pull_desc_set_numframes(&pulldesc, | |
1192 | q_storage->last_num_pkts); | |
1193 | qbman_pull_desc_set_fq(&pulldesc, fqid); | |
1194 | qbman_pull_desc_set_storage(&pulldesc, dq_storage, | |
1195 | (size_t)(DPAA2_VADDR_TO_IOVA(dq_storage)), 1); | |
1196 | if (check_swp_active_dqs(DPAA2_PER_LCORE_ETHRX_DPIO->index)) { | |
1197 | while (!qbman_check_command_complete( | |
1198 | get_swp_active_dqs( | |
1199 | DPAA2_PER_LCORE_ETHRX_DPIO->index))) | |
1200 | ; | |
1201 | clear_swp_active_dqs(DPAA2_PER_LCORE_ETHRX_DPIO->index); | |
1202 | } | |
1203 | while (1) { | |
1204 | if (qbman_swp_pull(swp, &pulldesc)) { | |
1205 | DPAA2_PMD_DP_DEBUG( | |
1206 | "VDQ command not issued.QBMAN busy\n"); | |
1207 | /* Portal was busy, try again */ | |
1208 | continue; | |
1209 | } | |
1210 | break; | |
1211 | } | |
1212 | q_storage->active_dqs = dq_storage; | |
1213 | q_storage->active_dpio_id = DPAA2_PER_LCORE_ETHRX_DPIO->index; | |
1214 | set_swp_active_dqs(DPAA2_PER_LCORE_ETHRX_DPIO->index, | |
1215 | dq_storage); | |
1216 | } | |
1217 | ||
1218 | dq_storage = q_storage->active_dqs; | |
1219 | rte_prefetch0((void *)(size_t)(dq_storage)); | |
1220 | rte_prefetch0((void *)(size_t)(dq_storage + 1)); | |
1221 | ||
1222 | /* Prepare next pull descriptor. This will give space for the | |
1223 | * prefething done on DQRR entries | |
1224 | */ | |
1225 | q_storage->toggle ^= 1; | |
1226 | dq_storage1 = q_storage->dq_storage[q_storage->toggle]; | |
1227 | qbman_pull_desc_clear(&pulldesc); | |
1228 | qbman_pull_desc_set_numframes(&pulldesc, pull_size); | |
1229 | qbman_pull_desc_set_fq(&pulldesc, fqid); | |
1230 | qbman_pull_desc_set_storage(&pulldesc, dq_storage1, | |
1231 | (size_t)(DPAA2_VADDR_TO_IOVA(dq_storage1)), 1); | |
1232 | ||
1233 | /*Prepare enqueue descriptor*/ | |
1234 | qbman_eq_desc_clear(&eqdesc); | |
1235 | qbman_eq_desc_set_no_orp(&eqdesc, DPAA2_EQ_RESP_ERR_FQ); | |
1236 | qbman_eq_desc_set_response(&eqdesc, 0, 0); | |
1237 | qbman_eq_desc_set_fq(&eqdesc, tx_q->fqid); | |
1238 | ||
1239 | /* Check if the previous issued command is completed. | |
1240 | * Also seems like the SWP is shared between the Ethernet Driver | |
1241 | * and the SEC driver. | |
1242 | */ | |
1243 | while (!qbman_check_command_complete(dq_storage)) | |
1244 | ; | |
1245 | if (dq_storage == get_swp_active_dqs(q_storage->active_dpio_id)) | |
1246 | clear_swp_active_dqs(q_storage->active_dpio_id); | |
1247 | ||
1248 | pending = 1; | |
1249 | ||
1250 | do { | |
1251 | /* Loop until the dq_storage is updated with | |
1252 | * new token by QBMAN | |
1253 | */ | |
1254 | while (!qbman_check_new_result(dq_storage)) | |
1255 | ; | |
1256 | rte_prefetch0((void *)((size_t)(dq_storage + 2))); | |
1257 | /* Check whether Last Pull command is Expired and | |
1258 | * setting Condition for Loop termination | |
1259 | */ | |
1260 | if (qbman_result_DQ_is_pull_complete(dq_storage)) { | |
1261 | pending = 0; | |
1262 | /* Check for valid frame. */ | |
1263 | status = qbman_result_DQ_flags(dq_storage); | |
1264 | if (unlikely((status & QBMAN_DQ_STAT_VALIDFRAME) == 0)) | |
1265 | continue; | |
1266 | } | |
1267 | fd[num_rx] = (struct qbman_fd *)qbman_result_DQ_fd(dq_storage); | |
1268 | ||
1269 | dq_storage++; | |
1270 | num_rx++; | |
1271 | } while (pending); | |
1272 | ||
1273 | while (num_tx < num_rx) { | |
1274 | num_tx += qbman_swp_enqueue_multiple_fd(swp, &eqdesc, | |
1275 | &fd[num_tx], 0, num_rx - num_tx); | |
1276 | } | |
1277 | ||
1278 | if (check_swp_active_dqs(DPAA2_PER_LCORE_ETHRX_DPIO->index)) { | |
1279 | while (!qbman_check_command_complete( | |
1280 | get_swp_active_dqs(DPAA2_PER_LCORE_ETHRX_DPIO->index))) | |
1281 | ; | |
1282 | clear_swp_active_dqs(DPAA2_PER_LCORE_ETHRX_DPIO->index); | |
1283 | } | |
1284 | /* issue a volatile dequeue command for next pull */ | |
1285 | while (1) { | |
1286 | if (qbman_swp_pull(swp, &pulldesc)) { | |
1287 | DPAA2_PMD_DP_DEBUG("VDQ command is not issued." | |
1288 | "QBMAN is busy (2)\n"); | |
1289 | continue; | |
1290 | } | |
1291 | break; | |
1292 | } | |
1293 | q_storage->active_dqs = dq_storage1; | |
1294 | q_storage->active_dpio_id = DPAA2_PER_LCORE_ETHRX_DPIO->index; | |
1295 | set_swp_active_dqs(DPAA2_PER_LCORE_ETHRX_DPIO->index, dq_storage1); | |
1296 | ||
1297 | dpaa2_q->rx_pkts += num_rx; | |
1298 | dpaa2_q->tx_pkts += num_tx; | |
1299 | ||
1300 | return 0; | |
1301 | } | |
1302 | #if defined(RTE_TOOLCHAIN_GCC) | |
1303 | #pragma GCC diagnostic pop | |
1304 | #elif defined(RTE_TOOLCHAIN_CLANG) | |
1305 | #pragma clang diagnostic pop | |
1306 | #endif |