]>
Commit | Line | Data |
---|---|---|
11fdf7f2 TL |
1 | /* SPDX-License-Identifier: BSD-3-Clause |
2 | * Copyright(c) 2014-2018 Broadcom | |
3 | * All rights reserved. | |
4 | */ | |
5 | ||
6 | #include <inttypes.h> | |
7 | #include <stdbool.h> | |
8 | ||
9 | #include <rte_bitmap.h> | |
10 | #include <rte_byteorder.h> | |
11 | #include <rte_malloc.h> | |
12 | #include <rte_memory.h> | |
13 | ||
14 | #include "bnxt.h" | |
15 | #include "bnxt_cpr.h" | |
16 | #include "bnxt_ring.h" | |
17 | #include "bnxt_rxr.h" | |
18 | #include "bnxt_rxq.h" | |
19 | #include "hsi_struct_def_dpdk.h" | |
20 | ||
21 | /* | |
22 | * RX Ring handling | |
23 | */ | |
24 | ||
25 | static inline struct rte_mbuf *__bnxt_alloc_rx_data(struct rte_mempool *mb) | |
26 | { | |
27 | struct rte_mbuf *data; | |
28 | ||
29 | data = rte_mbuf_raw_alloc(mb); | |
30 | ||
31 | return data; | |
32 | } | |
33 | ||
34 | static inline int bnxt_alloc_rx_data(struct bnxt_rx_queue *rxq, | |
35 | struct bnxt_rx_ring_info *rxr, | |
36 | uint16_t prod) | |
37 | { | |
38 | struct rx_prod_pkt_bd *rxbd = &rxr->rx_desc_ring[prod]; | |
39 | struct bnxt_sw_rx_bd *rx_buf = &rxr->rx_buf_ring[prod]; | |
40 | struct rte_mbuf *mbuf; | |
41 | ||
42 | mbuf = __bnxt_alloc_rx_data(rxq->mb_pool); | |
43 | if (!mbuf) { | |
44 | rte_atomic64_inc(&rxq->rx_mbuf_alloc_fail); | |
45 | return -ENOMEM; | |
46 | } | |
47 | ||
48 | rx_buf->mbuf = mbuf; | |
49 | mbuf->data_off = RTE_PKTMBUF_HEADROOM; | |
50 | ||
51 | rxbd->address = rte_cpu_to_le_64(rte_mbuf_data_iova_default(mbuf)); | |
52 | ||
53 | return 0; | |
54 | } | |
55 | ||
56 | static inline int bnxt_alloc_ag_data(struct bnxt_rx_queue *rxq, | |
57 | struct bnxt_rx_ring_info *rxr, | |
58 | uint16_t prod) | |
59 | { | |
60 | struct rx_prod_pkt_bd *rxbd = &rxr->ag_desc_ring[prod]; | |
61 | struct bnxt_sw_rx_bd *rx_buf = &rxr->ag_buf_ring[prod]; | |
62 | struct rte_mbuf *mbuf; | |
63 | ||
64 | mbuf = __bnxt_alloc_rx_data(rxq->mb_pool); | |
65 | if (!mbuf) { | |
66 | rte_atomic64_inc(&rxq->rx_mbuf_alloc_fail); | |
67 | return -ENOMEM; | |
68 | } | |
69 | ||
70 | if (rxbd == NULL) | |
71 | PMD_DRV_LOG(ERR, "Jumbo Frame. rxbd is NULL\n"); | |
72 | if (rx_buf == NULL) | |
73 | PMD_DRV_LOG(ERR, "Jumbo Frame. rx_buf is NULL\n"); | |
74 | ||
75 | ||
76 | rx_buf->mbuf = mbuf; | |
77 | mbuf->data_off = RTE_PKTMBUF_HEADROOM; | |
78 | ||
79 | rxbd->address = rte_cpu_to_le_64(rte_mbuf_data_iova_default(mbuf)); | |
80 | ||
81 | return 0; | |
82 | } | |
83 | ||
84 | static inline void bnxt_reuse_rx_mbuf(struct bnxt_rx_ring_info *rxr, | |
85 | struct rte_mbuf *mbuf) | |
86 | { | |
87 | uint16_t prod = RING_NEXT(rxr->rx_ring_struct, rxr->rx_prod); | |
88 | struct bnxt_sw_rx_bd *prod_rx_buf; | |
89 | struct rx_prod_pkt_bd *prod_bd; | |
90 | ||
91 | prod_rx_buf = &rxr->rx_buf_ring[prod]; | |
92 | ||
93 | RTE_ASSERT(prod_rx_buf->mbuf == NULL); | |
94 | RTE_ASSERT(mbuf != NULL); | |
95 | ||
96 | prod_rx_buf->mbuf = mbuf; | |
97 | ||
98 | prod_bd = &rxr->rx_desc_ring[prod]; | |
99 | ||
100 | prod_bd->address = rte_cpu_to_le_64(rte_mbuf_data_iova_default(mbuf)); | |
101 | ||
102 | rxr->rx_prod = prod; | |
103 | } | |
104 | ||
11fdf7f2 TL |
105 | static inline |
106 | struct rte_mbuf *bnxt_consume_rx_buf(struct bnxt_rx_ring_info *rxr, | |
107 | uint16_t cons) | |
108 | { | |
109 | struct bnxt_sw_rx_bd *cons_rx_buf; | |
110 | struct rte_mbuf *mbuf; | |
111 | ||
112 | cons_rx_buf = &rxr->rx_buf_ring[cons]; | |
113 | RTE_ASSERT(cons_rx_buf->mbuf != NULL); | |
114 | mbuf = cons_rx_buf->mbuf; | |
115 | cons_rx_buf->mbuf = NULL; | |
116 | return mbuf; | |
117 | } | |
118 | ||
119 | static void bnxt_tpa_start(struct bnxt_rx_queue *rxq, | |
120 | struct rx_tpa_start_cmpl *tpa_start, | |
121 | struct rx_tpa_start_cmpl_hi *tpa_start1) | |
122 | { | |
123 | struct bnxt_rx_ring_info *rxr = rxq->rx_ring; | |
124 | uint8_t agg_id = rte_le_to_cpu_32(tpa_start->agg_id & | |
125 | RX_TPA_START_CMPL_AGG_ID_MASK) >> RX_TPA_START_CMPL_AGG_ID_SFT; | |
126 | uint16_t data_cons; | |
127 | struct bnxt_tpa_info *tpa_info; | |
128 | struct rte_mbuf *mbuf; | |
129 | ||
130 | data_cons = tpa_start->opaque; | |
131 | tpa_info = &rxr->tpa_info[agg_id]; | |
132 | ||
133 | mbuf = bnxt_consume_rx_buf(rxr, data_cons); | |
134 | ||
135 | bnxt_reuse_rx_mbuf(rxr, tpa_info->mbuf); | |
136 | ||
137 | tpa_info->mbuf = mbuf; | |
138 | tpa_info->len = rte_le_to_cpu_32(tpa_start->len); | |
139 | ||
140 | mbuf->nb_segs = 1; | |
141 | mbuf->next = NULL; | |
142 | mbuf->pkt_len = rte_le_to_cpu_32(tpa_start->len); | |
143 | mbuf->data_len = mbuf->pkt_len; | |
144 | mbuf->port = rxq->port_id; | |
145 | mbuf->ol_flags = PKT_RX_LRO; | |
146 | if (likely(tpa_start->flags_type & | |
147 | rte_cpu_to_le_32(RX_TPA_START_CMPL_FLAGS_RSS_VALID))) { | |
148 | mbuf->hash.rss = rte_le_to_cpu_32(tpa_start->rss_hash); | |
149 | mbuf->ol_flags |= PKT_RX_RSS_HASH; | |
150 | } else { | |
151 | mbuf->hash.fdir.id = rte_le_to_cpu_16(tpa_start1->cfa_code); | |
152 | mbuf->ol_flags |= PKT_RX_FDIR | PKT_RX_FDIR_ID; | |
153 | } | |
154 | if (tpa_start1->flags2 & | |
155 | rte_cpu_to_le_32(RX_TPA_START_CMPL_FLAGS2_META_FORMAT_VLAN)) { | |
156 | mbuf->vlan_tci = rte_le_to_cpu_32(tpa_start1->metadata); | |
9f95a23c | 157 | mbuf->ol_flags |= PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED; |
11fdf7f2 TL |
158 | } |
159 | if (likely(tpa_start1->flags2 & | |
160 | rte_cpu_to_le_32(RX_TPA_START_CMPL_FLAGS2_L4_CS_CALC))) | |
161 | mbuf->ol_flags |= PKT_RX_L4_CKSUM_GOOD; | |
162 | ||
163 | /* recycle next mbuf */ | |
164 | data_cons = RING_NEXT(rxr->rx_ring_struct, data_cons); | |
165 | bnxt_reuse_rx_mbuf(rxr, bnxt_consume_rx_buf(rxr, data_cons)); | |
166 | } | |
167 | ||
168 | static int bnxt_agg_bufs_valid(struct bnxt_cp_ring_info *cpr, | |
169 | uint8_t agg_bufs, uint32_t raw_cp_cons) | |
170 | { | |
171 | uint16_t last_cp_cons; | |
172 | struct rx_pkt_cmpl *agg_cmpl; | |
173 | ||
174 | raw_cp_cons = ADV_RAW_CMP(raw_cp_cons, agg_bufs); | |
175 | last_cp_cons = RING_CMP(cpr->cp_ring_struct, raw_cp_cons); | |
176 | agg_cmpl = (struct rx_pkt_cmpl *)&cpr->cp_desc_ring[last_cp_cons]; | |
177 | cpr->valid = FLIP_VALID(raw_cp_cons, | |
178 | cpr->cp_ring_struct->ring_mask, | |
179 | cpr->valid); | |
180 | return CMP_VALID(agg_cmpl, raw_cp_cons, cpr->cp_ring_struct); | |
181 | } | |
182 | ||
183 | /* TPA consume agg buffer out of order, allocate connected data only */ | |
184 | static int bnxt_prod_ag_mbuf(struct bnxt_rx_queue *rxq) | |
185 | { | |
186 | struct bnxt_rx_ring_info *rxr = rxq->rx_ring; | |
187 | uint16_t next = RING_NEXT(rxr->ag_ring_struct, rxr->ag_prod); | |
188 | ||
189 | /* TODO batch allocation for better performance */ | |
190 | while (rte_bitmap_get(rxr->ag_bitmap, next)) { | |
191 | if (unlikely(bnxt_alloc_ag_data(rxq, rxr, next))) { | |
192 | PMD_DRV_LOG(ERR, | |
193 | "agg mbuf alloc failed: prod=0x%x\n", next); | |
194 | break; | |
195 | } | |
196 | rte_bitmap_clear(rxr->ag_bitmap, next); | |
197 | rxr->ag_prod = next; | |
198 | next = RING_NEXT(rxr->ag_ring_struct, next); | |
199 | } | |
200 | ||
201 | return 0; | |
202 | } | |
203 | ||
204 | static int bnxt_rx_pages(struct bnxt_rx_queue *rxq, | |
205 | struct rte_mbuf *mbuf, uint32_t *tmp_raw_cons, | |
206 | uint8_t agg_buf) | |
207 | { | |
208 | struct bnxt_cp_ring_info *cpr = rxq->cp_ring; | |
209 | struct bnxt_rx_ring_info *rxr = rxq->rx_ring; | |
210 | int i; | |
211 | uint16_t cp_cons, ag_cons; | |
212 | struct rx_pkt_cmpl *rxcmp; | |
213 | struct rte_mbuf *last = mbuf; | |
214 | ||
215 | for (i = 0; i < agg_buf; i++) { | |
216 | struct bnxt_sw_rx_bd *ag_buf; | |
217 | struct rte_mbuf *ag_mbuf; | |
218 | *tmp_raw_cons = NEXT_RAW_CMP(*tmp_raw_cons); | |
219 | cp_cons = RING_CMP(cpr->cp_ring_struct, *tmp_raw_cons); | |
220 | rxcmp = (struct rx_pkt_cmpl *) | |
221 | &cpr->cp_desc_ring[cp_cons]; | |
222 | ||
223 | #ifdef BNXT_DEBUG | |
224 | bnxt_dump_cmpl(cp_cons, rxcmp); | |
225 | #endif | |
226 | ||
227 | ag_cons = rxcmp->opaque; | |
228 | RTE_ASSERT(ag_cons <= rxr->ag_ring_struct->ring_mask); | |
229 | ag_buf = &rxr->ag_buf_ring[ag_cons]; | |
230 | ag_mbuf = ag_buf->mbuf; | |
231 | RTE_ASSERT(ag_mbuf != NULL); | |
232 | ||
233 | ag_mbuf->data_len = rte_le_to_cpu_16(rxcmp->len); | |
234 | ||
235 | mbuf->nb_segs++; | |
236 | mbuf->pkt_len += ag_mbuf->data_len; | |
237 | ||
238 | last->next = ag_mbuf; | |
239 | last = ag_mbuf; | |
240 | ||
241 | ag_buf->mbuf = NULL; | |
242 | ||
243 | /* | |
244 | * As aggregation buffer consumed out of order in TPA module, | |
245 | * use bitmap to track freed slots to be allocated and notified | |
246 | * to NIC | |
247 | */ | |
248 | rte_bitmap_set(rxr->ag_bitmap, ag_cons); | |
249 | } | |
250 | bnxt_prod_ag_mbuf(rxq); | |
251 | return 0; | |
252 | } | |
253 | ||
254 | static inline struct rte_mbuf *bnxt_tpa_end( | |
255 | struct bnxt_rx_queue *rxq, | |
256 | uint32_t *raw_cp_cons, | |
257 | struct rx_tpa_end_cmpl *tpa_end, | |
258 | struct rx_tpa_end_cmpl_hi *tpa_end1 __rte_unused) | |
259 | { | |
260 | struct bnxt_cp_ring_info *cpr = rxq->cp_ring; | |
261 | struct bnxt_rx_ring_info *rxr = rxq->rx_ring; | |
262 | uint8_t agg_id = (tpa_end->agg_id & RX_TPA_END_CMPL_AGG_ID_MASK) | |
263 | >> RX_TPA_END_CMPL_AGG_ID_SFT; | |
264 | struct rte_mbuf *mbuf; | |
265 | uint8_t agg_bufs; | |
266 | struct bnxt_tpa_info *tpa_info; | |
267 | ||
268 | tpa_info = &rxr->tpa_info[agg_id]; | |
269 | mbuf = tpa_info->mbuf; | |
270 | RTE_ASSERT(mbuf != NULL); | |
271 | ||
272 | rte_prefetch0(mbuf); | |
273 | agg_bufs = (rte_le_to_cpu_32(tpa_end->agg_bufs_v1) & | |
274 | RX_TPA_END_CMPL_AGG_BUFS_MASK) >> RX_TPA_END_CMPL_AGG_BUFS_SFT; | |
275 | if (agg_bufs) { | |
276 | if (!bnxt_agg_bufs_valid(cpr, agg_bufs, *raw_cp_cons)) | |
277 | return NULL; | |
278 | bnxt_rx_pages(rxq, mbuf, raw_cp_cons, agg_bufs); | |
279 | } | |
280 | mbuf->l4_len = tpa_end->payload_offset; | |
281 | ||
282 | struct rte_mbuf *new_data = __bnxt_alloc_rx_data(rxq->mb_pool); | |
283 | RTE_ASSERT(new_data != NULL); | |
284 | if (!new_data) { | |
285 | rte_atomic64_inc(&rxq->rx_mbuf_alloc_fail); | |
286 | return NULL; | |
287 | } | |
288 | tpa_info->mbuf = new_data; | |
289 | ||
290 | return mbuf; | |
291 | } | |
292 | ||
293 | static uint32_t | |
294 | bnxt_parse_pkt_type(struct rx_pkt_cmpl *rxcmp, struct rx_pkt_cmpl_hi *rxcmp1) | |
295 | { | |
296 | uint32_t l3, pkt_type = 0; | |
297 | uint32_t t_ipcs = 0, ip6 = 0, vlan = 0; | |
298 | uint32_t flags_type; | |
299 | ||
300 | vlan = !!(rxcmp1->flags2 & | |
301 | rte_cpu_to_le_32(RX_PKT_CMPL_FLAGS2_META_FORMAT_VLAN)); | |
302 | pkt_type |= vlan ? RTE_PTYPE_L2_ETHER_VLAN : RTE_PTYPE_L2_ETHER; | |
303 | ||
304 | t_ipcs = !!(rxcmp1->flags2 & | |
305 | rte_cpu_to_le_32(RX_PKT_CMPL_FLAGS2_T_IP_CS_CALC)); | |
306 | ip6 = !!(rxcmp1->flags2 & | |
307 | rte_cpu_to_le_32(RX_PKT_CMPL_FLAGS2_IP_TYPE)); | |
308 | ||
309 | flags_type = rxcmp->flags_type & | |
310 | rte_cpu_to_le_32(RX_PKT_CMPL_FLAGS_ITYPE_MASK); | |
311 | ||
312 | if (!t_ipcs && !ip6) | |
313 | l3 = RTE_PTYPE_L3_IPV4_EXT_UNKNOWN; | |
314 | else if (!t_ipcs && ip6) | |
315 | l3 = RTE_PTYPE_L3_IPV6_EXT_UNKNOWN; | |
316 | else if (t_ipcs && !ip6) | |
317 | l3 = RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN; | |
318 | else | |
319 | l3 = RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN; | |
320 | ||
321 | switch (flags_type) { | |
322 | case RTE_LE32(RX_PKT_CMPL_FLAGS_ITYPE_ICMP): | |
323 | if (!t_ipcs) | |
324 | pkt_type |= l3 | RTE_PTYPE_L4_ICMP; | |
325 | else | |
326 | pkt_type |= l3 | RTE_PTYPE_INNER_L4_ICMP; | |
327 | break; | |
328 | ||
329 | case RTE_LE32(RX_PKT_CMPL_FLAGS_ITYPE_TCP): | |
330 | if (!t_ipcs) | |
331 | pkt_type |= l3 | RTE_PTYPE_L4_TCP; | |
332 | else | |
333 | pkt_type |= l3 | RTE_PTYPE_INNER_L4_TCP; | |
334 | break; | |
335 | ||
336 | case RTE_LE32(RX_PKT_CMPL_FLAGS_ITYPE_UDP): | |
337 | if (!t_ipcs) | |
338 | pkt_type |= l3 | RTE_PTYPE_L4_UDP; | |
339 | else | |
340 | pkt_type |= l3 | RTE_PTYPE_INNER_L4_UDP; | |
341 | break; | |
342 | ||
343 | case RTE_LE32(RX_PKT_CMPL_FLAGS_ITYPE_IP): | |
344 | pkt_type |= l3; | |
345 | break; | |
346 | } | |
347 | ||
348 | return pkt_type; | |
349 | } | |
350 | ||
351 | static int bnxt_rx_pkt(struct rte_mbuf **rx_pkt, | |
352 | struct bnxt_rx_queue *rxq, uint32_t *raw_cons) | |
353 | { | |
354 | struct bnxt_cp_ring_info *cpr = rxq->cp_ring; | |
355 | struct bnxt_rx_ring_info *rxr = rxq->rx_ring; | |
356 | struct rx_pkt_cmpl *rxcmp; | |
357 | struct rx_pkt_cmpl_hi *rxcmp1; | |
358 | uint32_t tmp_raw_cons = *raw_cons; | |
359 | uint16_t cons, prod, cp_cons = | |
360 | RING_CMP(cpr->cp_ring_struct, tmp_raw_cons); | |
11fdf7f2 TL |
361 | struct rte_mbuf *mbuf; |
362 | int rc = 0; | |
363 | uint8_t agg_buf = 0; | |
364 | uint16_t cmp_type; | |
365 | ||
366 | rxcmp = (struct rx_pkt_cmpl *) | |
367 | &cpr->cp_desc_ring[cp_cons]; | |
368 | ||
369 | tmp_raw_cons = NEXT_RAW_CMP(tmp_raw_cons); | |
370 | cp_cons = RING_CMP(cpr->cp_ring_struct, tmp_raw_cons); | |
371 | rxcmp1 = (struct rx_pkt_cmpl_hi *)&cpr->cp_desc_ring[cp_cons]; | |
372 | ||
373 | if (!CMP_VALID(rxcmp1, tmp_raw_cons, cpr->cp_ring_struct)) | |
374 | return -EBUSY; | |
375 | ||
376 | cpr->valid = FLIP_VALID(cp_cons, | |
377 | cpr->cp_ring_struct->ring_mask, | |
378 | cpr->valid); | |
379 | ||
380 | cmp_type = CMP_TYPE(rxcmp); | |
381 | if (cmp_type == RX_TPA_START_CMPL_TYPE_RX_TPA_START) { | |
382 | bnxt_tpa_start(rxq, (struct rx_tpa_start_cmpl *)rxcmp, | |
383 | (struct rx_tpa_start_cmpl_hi *)rxcmp1); | |
384 | rc = -EINVAL; /* Continue w/o new mbuf */ | |
385 | goto next_rx; | |
386 | } else if (cmp_type == RX_TPA_END_CMPL_TYPE_RX_TPA_END) { | |
387 | mbuf = bnxt_tpa_end(rxq, &tmp_raw_cons, | |
388 | (struct rx_tpa_end_cmpl *)rxcmp, | |
389 | (struct rx_tpa_end_cmpl_hi *)rxcmp1); | |
390 | if (unlikely(!mbuf)) | |
391 | return -EBUSY; | |
392 | *rx_pkt = mbuf; | |
393 | goto next_rx; | |
394 | } else if (cmp_type != 0x11) { | |
395 | rc = -EINVAL; | |
396 | goto next_rx; | |
397 | } | |
398 | ||
399 | agg_buf = (rxcmp->agg_bufs_v1 & RX_PKT_CMPL_AGG_BUFS_MASK) | |
400 | >> RX_PKT_CMPL_AGG_BUFS_SFT; | |
401 | if (agg_buf && !bnxt_agg_bufs_valid(cpr, agg_buf, tmp_raw_cons)) | |
402 | return -EBUSY; | |
403 | ||
404 | prod = rxr->rx_prod; | |
405 | ||
406 | cons = rxcmp->opaque; | |
407 | mbuf = bnxt_consume_rx_buf(rxr, cons); | |
408 | if (mbuf == NULL) | |
409 | return -EBUSY; | |
410 | ||
411 | rte_prefetch0(mbuf); | |
412 | ||
413 | mbuf->data_off = RTE_PKTMBUF_HEADROOM; | |
414 | mbuf->nb_segs = 1; | |
415 | mbuf->next = NULL; | |
416 | mbuf->pkt_len = rxcmp->len; | |
417 | mbuf->data_len = mbuf->pkt_len; | |
418 | mbuf->port = rxq->port_id; | |
419 | mbuf->ol_flags = 0; | |
420 | if (rxcmp->flags_type & RX_PKT_CMPL_FLAGS_RSS_VALID) { | |
421 | mbuf->hash.rss = rxcmp->rss_hash; | |
422 | mbuf->ol_flags |= PKT_RX_RSS_HASH; | |
423 | } else { | |
424 | mbuf->hash.fdir.id = rxcmp1->cfa_code; | |
425 | mbuf->ol_flags |= PKT_RX_FDIR | PKT_RX_FDIR_ID; | |
426 | } | |
427 | ||
428 | if ((rxcmp->flags_type & rte_cpu_to_le_16(RX_PKT_CMPL_FLAGS_MASK)) == | |
429 | RX_PKT_CMPL_FLAGS_ITYPE_PTP_W_TIMESTAMP) | |
430 | mbuf->ol_flags |= PKT_RX_IEEE1588_PTP | PKT_RX_IEEE1588_TMST; | |
431 | ||
432 | if (agg_buf) | |
433 | bnxt_rx_pages(rxq, mbuf, &tmp_raw_cons, agg_buf); | |
434 | ||
435 | if (rxcmp1->flags2 & RX_PKT_CMPL_FLAGS2_META_FORMAT_VLAN) { | |
436 | mbuf->vlan_tci = rxcmp1->metadata & | |
437 | (RX_PKT_CMPL_METADATA_VID_MASK | | |
438 | RX_PKT_CMPL_METADATA_DE | | |
439 | RX_PKT_CMPL_METADATA_PRI_MASK); | |
9f95a23c | 440 | mbuf->ol_flags |= PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED; |
11fdf7f2 TL |
441 | } |
442 | ||
443 | if (likely(RX_CMP_IP_CS_OK(rxcmp1))) | |
444 | mbuf->ol_flags |= PKT_RX_IP_CKSUM_GOOD; | |
445 | else if (likely(RX_CMP_IP_CS_UNKNOWN(rxcmp1))) | |
446 | mbuf->ol_flags |= PKT_RX_IP_CKSUM_UNKNOWN; | |
447 | else | |
448 | mbuf->ol_flags |= PKT_RX_IP_CKSUM_BAD; | |
449 | ||
450 | if (likely(RX_CMP_L4_CS_OK(rxcmp1))) | |
451 | mbuf->ol_flags |= PKT_RX_L4_CKSUM_GOOD; | |
452 | else if (likely(RX_CMP_L4_CS_UNKNOWN(rxcmp1))) | |
453 | mbuf->ol_flags |= PKT_RX_L4_CKSUM_UNKNOWN; | |
454 | else | |
455 | mbuf->ol_flags |= PKT_RX_L4_CKSUM_BAD; | |
456 | ||
457 | mbuf->packet_type = bnxt_parse_pkt_type(rxcmp, rxcmp1); | |
458 | ||
459 | #ifdef BNXT_DEBUG | |
460 | if (rxcmp1->errors_v2 & RX_CMP_L2_ERRORS) { | |
461 | /* Re-install the mbuf back to the rx ring */ | |
462 | bnxt_reuse_rx_mbuf(rxr, cons, mbuf); | |
11fdf7f2 TL |
463 | |
464 | rc = -EIO; | |
465 | goto next_rx; | |
466 | } | |
467 | #endif | |
468 | /* | |
469 | * TODO: Redesign this.... | |
470 | * If the allocation fails, the packet does not get received. | |
471 | * Simply returning this will result in slowly falling behind | |
472 | * on the producer ring buffers. | |
473 | * Instead, "filling up" the producer just before ringing the | |
474 | * doorbell could be a better solution since it will let the | |
475 | * producer ring starve until memory is available again pushing | |
476 | * the drops into hardware and getting them out of the driver | |
477 | * allowing recovery to a full producer ring. | |
478 | * | |
479 | * This could also help with cache usage by preventing per-packet | |
480 | * calls in favour of a tight loop with the same function being called | |
481 | * in it. | |
482 | */ | |
483 | prod = RING_NEXT(rxr->rx_ring_struct, prod); | |
484 | if (bnxt_alloc_rx_data(rxq, rxr, prod)) { | |
485 | PMD_DRV_LOG(ERR, "mbuf alloc failed with prod=0x%x\n", prod); | |
486 | rc = -ENOMEM; | |
487 | goto rx; | |
488 | } | |
489 | rxr->rx_prod = prod; | |
490 | /* | |
491 | * All MBUFs are allocated with the same size under DPDK, | |
492 | * no optimization for rx_copy_thresh | |
493 | */ | |
494 | rx: | |
495 | *rx_pkt = mbuf; | |
496 | ||
497 | next_rx: | |
498 | ||
499 | *raw_cons = tmp_raw_cons; | |
500 | ||
501 | return rc; | |
502 | } | |
503 | ||
504 | uint16_t bnxt_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, | |
505 | uint16_t nb_pkts) | |
506 | { | |
507 | struct bnxt_rx_queue *rxq = rx_queue; | |
508 | struct bnxt_cp_ring_info *cpr = rxq->cp_ring; | |
509 | struct bnxt_rx_ring_info *rxr = rxq->rx_ring; | |
510 | uint32_t raw_cons = cpr->cp_raw_cons; | |
511 | uint32_t cons; | |
512 | int nb_rx_pkts = 0; | |
513 | struct rx_pkt_cmpl *rxcmp; | |
514 | uint16_t prod = rxr->rx_prod; | |
515 | uint16_t ag_prod = rxr->ag_prod; | |
516 | int rc = 0; | |
517 | bool evt = false; | |
518 | ||
519 | /* If Rx Q was stopped return. RxQ0 cannot be stopped. */ | |
520 | if (unlikely(((rxq->rx_deferred_start || | |
521 | !rte_spinlock_trylock(&rxq->lock)) && | |
522 | rxq->queue_id))) | |
523 | return 0; | |
524 | ||
525 | /* Handle RX burst request */ | |
526 | while (1) { | |
527 | cons = RING_CMP(cpr->cp_ring_struct, raw_cons); | |
528 | rte_prefetch0(&cpr->cp_desc_ring[cons]); | |
529 | rxcmp = (struct rx_pkt_cmpl *)&cpr->cp_desc_ring[cons]; | |
530 | ||
531 | if (!CMP_VALID(rxcmp, raw_cons, cpr->cp_ring_struct)) | |
532 | break; | |
533 | cpr->valid = FLIP_VALID(cons, | |
534 | cpr->cp_ring_struct->ring_mask, | |
535 | cpr->valid); | |
536 | ||
537 | /* TODO: Avoid magic numbers... */ | |
538 | if ((CMP_TYPE(rxcmp) & 0x30) == 0x10) { | |
539 | rc = bnxt_rx_pkt(&rx_pkts[nb_rx_pkts], rxq, &raw_cons); | |
540 | if (likely(!rc) || rc == -ENOMEM) | |
541 | nb_rx_pkts++; | |
542 | if (rc == -EBUSY) /* partial completion */ | |
543 | break; | |
544 | } else { | |
545 | evt = | |
546 | bnxt_event_hwrm_resp_handler(rxq->bp, | |
547 | (struct cmpl_base *)rxcmp); | |
548 | } | |
549 | ||
550 | raw_cons = NEXT_RAW_CMP(raw_cons); | |
551 | if (nb_rx_pkts == nb_pkts || evt) | |
552 | break; | |
553 | /* Post some Rx buf early in case of larger burst processing */ | |
554 | if (nb_rx_pkts == BNXT_RX_POST_THRESH) | |
555 | B_RX_DB(rxr->rx_doorbell, rxr->rx_prod); | |
556 | } | |
557 | ||
558 | cpr->cp_raw_cons = raw_cons; | |
559 | if (!nb_rx_pkts && !evt) { | |
560 | /* | |
561 | * For PMD, there is no need to keep on pushing to REARM | |
562 | * the doorbell if there are no new completions | |
563 | */ | |
564 | goto done; | |
565 | } | |
566 | ||
567 | if (prod != rxr->rx_prod) | |
568 | B_RX_DB(rxr->rx_doorbell, rxr->rx_prod); | |
569 | ||
570 | /* Ring the AGG ring DB */ | |
571 | if (ag_prod != rxr->ag_prod) | |
572 | B_RX_DB(rxr->ag_doorbell, rxr->ag_prod); | |
573 | ||
574 | B_CP_DIS_DB(cpr, cpr->cp_raw_cons); | |
575 | ||
576 | /* Attempt to alloc Rx buf in case of a previous allocation failure. */ | |
577 | if (rc == -ENOMEM) { | |
578 | int i; | |
579 | ||
580 | for (i = prod; i <= nb_rx_pkts; | |
581 | i = RING_NEXT(rxr->rx_ring_struct, i)) { | |
582 | struct bnxt_sw_rx_bd *rx_buf = &rxr->rx_buf_ring[i]; | |
583 | ||
584 | /* Buffer already allocated for this index. */ | |
585 | if (rx_buf->mbuf != NULL) | |
586 | continue; | |
587 | ||
588 | /* This slot is empty. Alloc buffer for Rx */ | |
589 | if (!bnxt_alloc_rx_data(rxq, rxr, i)) { | |
590 | rxr->rx_prod = i; | |
591 | B_RX_DB(rxr->rx_doorbell, rxr->rx_prod); | |
592 | } else { | |
593 | PMD_DRV_LOG(ERR, "Alloc mbuf failed\n"); | |
594 | break; | |
595 | } | |
596 | } | |
597 | } | |
598 | ||
599 | done: | |
600 | rte_spinlock_unlock(&rxq->lock); | |
601 | ||
602 | return nb_rx_pkts; | |
603 | } | |
604 | ||
605 | void bnxt_free_rx_rings(struct bnxt *bp) | |
606 | { | |
607 | int i; | |
608 | struct bnxt_rx_queue *rxq; | |
609 | ||
610 | if (!bp->rx_queues) | |
611 | return; | |
612 | ||
613 | for (i = 0; i < (int)bp->rx_nr_rings; i++) { | |
614 | rxq = bp->rx_queues[i]; | |
615 | if (!rxq) | |
616 | continue; | |
617 | ||
618 | bnxt_free_ring(rxq->rx_ring->rx_ring_struct); | |
619 | rte_free(rxq->rx_ring->rx_ring_struct); | |
620 | ||
621 | /* Free the Aggregator ring */ | |
622 | bnxt_free_ring(rxq->rx_ring->ag_ring_struct); | |
623 | rte_free(rxq->rx_ring->ag_ring_struct); | |
624 | rxq->rx_ring->ag_ring_struct = NULL; | |
625 | ||
626 | rte_free(rxq->rx_ring); | |
627 | ||
628 | bnxt_free_ring(rxq->cp_ring->cp_ring_struct); | |
629 | rte_free(rxq->cp_ring->cp_ring_struct); | |
630 | rte_free(rxq->cp_ring); | |
631 | ||
632 | rte_free(rxq); | |
633 | bp->rx_queues[i] = NULL; | |
634 | } | |
635 | } | |
636 | ||
637 | int bnxt_init_rx_ring_struct(struct bnxt_rx_queue *rxq, unsigned int socket_id) | |
638 | { | |
639 | struct bnxt_cp_ring_info *cpr; | |
640 | struct bnxt_rx_ring_info *rxr; | |
641 | struct bnxt_ring *ring; | |
642 | ||
643 | rxq->rx_buf_use_size = BNXT_MAX_MTU + ETHER_HDR_LEN + ETHER_CRC_LEN + | |
644 | (2 * VLAN_TAG_SIZE); | |
645 | rxq->rx_buf_size = rxq->rx_buf_use_size + sizeof(struct rte_mbuf); | |
646 | ||
647 | rxr = rte_zmalloc_socket("bnxt_rx_ring", | |
648 | sizeof(struct bnxt_rx_ring_info), | |
649 | RTE_CACHE_LINE_SIZE, socket_id); | |
650 | if (rxr == NULL) | |
651 | return -ENOMEM; | |
652 | rxq->rx_ring = rxr; | |
653 | ||
654 | ring = rte_zmalloc_socket("bnxt_rx_ring_struct", | |
655 | sizeof(struct bnxt_ring), | |
656 | RTE_CACHE_LINE_SIZE, socket_id); | |
657 | if (ring == NULL) | |
658 | return -ENOMEM; | |
659 | rxr->rx_ring_struct = ring; | |
660 | ring->ring_size = rte_align32pow2(rxq->nb_rx_desc); | |
661 | ring->ring_mask = ring->ring_size - 1; | |
662 | ring->bd = (void *)rxr->rx_desc_ring; | |
663 | ring->bd_dma = rxr->rx_desc_mapping; | |
664 | ring->vmem_size = ring->ring_size * sizeof(struct bnxt_sw_rx_bd); | |
665 | ring->vmem = (void **)&rxr->rx_buf_ring; | |
666 | ||
667 | cpr = rte_zmalloc_socket("bnxt_rx_ring", | |
668 | sizeof(struct bnxt_cp_ring_info), | |
669 | RTE_CACHE_LINE_SIZE, socket_id); | |
670 | if (cpr == NULL) | |
671 | return -ENOMEM; | |
672 | rxq->cp_ring = cpr; | |
673 | ||
674 | ring = rte_zmalloc_socket("bnxt_rx_ring_struct", | |
675 | sizeof(struct bnxt_ring), | |
676 | RTE_CACHE_LINE_SIZE, socket_id); | |
677 | if (ring == NULL) | |
678 | return -ENOMEM; | |
679 | cpr->cp_ring_struct = ring; | |
680 | ring->ring_size = rte_align32pow2(rxr->rx_ring_struct->ring_size * | |
681 | (2 + AGG_RING_SIZE_FACTOR)); | |
682 | ring->ring_mask = ring->ring_size - 1; | |
683 | ring->bd = (void *)cpr->cp_desc_ring; | |
684 | ring->bd_dma = cpr->cp_desc_mapping; | |
685 | ring->vmem_size = 0; | |
686 | ring->vmem = NULL; | |
687 | ||
688 | /* Allocate Aggregator rings */ | |
689 | ring = rte_zmalloc_socket("bnxt_rx_ring_struct", | |
690 | sizeof(struct bnxt_ring), | |
691 | RTE_CACHE_LINE_SIZE, socket_id); | |
692 | if (ring == NULL) | |
693 | return -ENOMEM; | |
694 | rxr->ag_ring_struct = ring; | |
695 | ring->ring_size = rte_align32pow2(rxq->nb_rx_desc * | |
696 | AGG_RING_SIZE_FACTOR); | |
697 | ring->ring_mask = ring->ring_size - 1; | |
698 | ring->bd = (void *)rxr->ag_desc_ring; | |
699 | ring->bd_dma = rxr->ag_desc_mapping; | |
700 | ring->vmem_size = ring->ring_size * sizeof(struct bnxt_sw_rx_bd); | |
701 | ring->vmem = (void **)&rxr->ag_buf_ring; | |
702 | ||
703 | return 0; | |
704 | } | |
705 | ||
706 | static void bnxt_init_rxbds(struct bnxt_ring *ring, uint32_t type, | |
707 | uint16_t len) | |
708 | { | |
709 | uint32_t j; | |
710 | struct rx_prod_pkt_bd *rx_bd_ring = (struct rx_prod_pkt_bd *)ring->bd; | |
711 | ||
712 | if (!rx_bd_ring) | |
713 | return; | |
714 | for (j = 0; j < ring->ring_size; j++) { | |
715 | rx_bd_ring[j].flags_type = rte_cpu_to_le_16(type); | |
716 | rx_bd_ring[j].len = rte_cpu_to_le_16(len); | |
717 | rx_bd_ring[j].opaque = j; | |
718 | } | |
719 | } | |
720 | ||
721 | int bnxt_init_one_rx_ring(struct bnxt_rx_queue *rxq) | |
722 | { | |
723 | struct bnxt_rx_ring_info *rxr; | |
724 | struct bnxt_ring *ring; | |
725 | uint32_t prod, type; | |
726 | unsigned int i; | |
727 | uint16_t size; | |
728 | ||
729 | size = rte_pktmbuf_data_room_size(rxq->mb_pool) - RTE_PKTMBUF_HEADROOM; | |
730 | if (rxq->rx_buf_use_size <= size) | |
731 | size = rxq->rx_buf_use_size; | |
732 | ||
733 | type = RX_PROD_PKT_BD_TYPE_RX_PROD_PKT | RX_PROD_PKT_BD_FLAGS_EOP_PAD; | |
734 | ||
735 | rxr = rxq->rx_ring; | |
736 | ring = rxr->rx_ring_struct; | |
737 | bnxt_init_rxbds(ring, type, size); | |
738 | ||
739 | prod = rxr->rx_prod; | |
740 | for (i = 0; i < ring->ring_size; i++) { | |
741 | if (bnxt_alloc_rx_data(rxq, rxr, prod) != 0) { | |
742 | PMD_DRV_LOG(WARNING, | |
743 | "init'ed rx ring %d with %d/%d mbufs only\n", | |
744 | rxq->queue_id, i, ring->ring_size); | |
745 | break; | |
746 | } | |
747 | rxr->rx_prod = prod; | |
748 | prod = RING_NEXT(rxr->rx_ring_struct, prod); | |
749 | } | |
750 | ||
751 | ring = rxr->ag_ring_struct; | |
752 | type = RX_PROD_AGG_BD_TYPE_RX_PROD_AGG; | |
753 | bnxt_init_rxbds(ring, type, size); | |
754 | prod = rxr->ag_prod; | |
755 | ||
756 | for (i = 0; i < ring->ring_size; i++) { | |
757 | if (bnxt_alloc_ag_data(rxq, rxr, prod) != 0) { | |
758 | PMD_DRV_LOG(WARNING, | |
759 | "init'ed AG ring %d with %d/%d mbufs only\n", | |
760 | rxq->queue_id, i, ring->ring_size); | |
761 | break; | |
762 | } | |
763 | rxr->ag_prod = prod; | |
764 | prod = RING_NEXT(rxr->ag_ring_struct, prod); | |
765 | } | |
766 | PMD_DRV_LOG(DEBUG, "AGG Done!\n"); | |
767 | ||
768 | if (rxr->tpa_info) { | |
769 | for (i = 0; i < BNXT_TPA_MAX; i++) { | |
770 | rxr->tpa_info[i].mbuf = | |
771 | __bnxt_alloc_rx_data(rxq->mb_pool); | |
772 | if (!rxr->tpa_info[i].mbuf) { | |
773 | rte_atomic64_inc(&rxq->rx_mbuf_alloc_fail); | |
774 | return -ENOMEM; | |
775 | } | |
776 | } | |
777 | } | |
778 | PMD_DRV_LOG(DEBUG, "TPA alloc Done!\n"); | |
779 | ||
780 | return 0; | |
781 | } |