]>
Commit | Line | Data |
---|---|---|
9f95a23c | 1 | /* SPDX-License-Identifier: BSD-3-Clause |
7c673cae | 2 | * Copyright (c) 2013-2015 Brocade Communications Systems, Inc. |
9f95a23c | 3 | * Copyright (c) 2015-2018 Cavium Inc. |
7c673cae | 4 | * All rights reserved. |
9f95a23c | 5 | * www.cavium.com |
7c673cae FG |
6 | */ |
7 | ||
8 | #include "bnx2x.h" | |
9 | #include "bnx2x_rxtx.h" | |
10 | ||
11 | static const struct rte_memzone * | |
12 | ring_dma_zone_reserve(struct rte_eth_dev *dev, const char *ring_name, | |
13 | uint16_t queue_id, uint32_t ring_size, int socket_id) | |
14 | { | |
9f95a23c TL |
15 | return rte_eth_dma_zone_reserve(dev, ring_name, queue_id, |
16 | ring_size, BNX2X_PAGE_SIZE, socket_id); | |
7c673cae FG |
17 | } |
18 | ||
19 | static void | |
20 | bnx2x_rx_queue_release(struct bnx2x_rx_queue *rx_queue) | |
21 | { | |
22 | uint16_t i; | |
23 | struct rte_mbuf **sw_ring; | |
24 | ||
25 | if (NULL != rx_queue) { | |
26 | ||
27 | sw_ring = rx_queue->sw_ring; | |
28 | if (NULL != sw_ring) { | |
29 | for (i = 0; i < rx_queue->nb_rx_desc; i++) { | |
30 | if (NULL != sw_ring[i]) | |
31 | rte_pktmbuf_free(sw_ring[i]); | |
32 | } | |
33 | rte_free(sw_ring); | |
34 | } | |
35 | rte_free(rx_queue); | |
36 | } | |
37 | } | |
38 | ||
39 | void | |
40 | bnx2x_dev_rx_queue_release(void *rxq) | |
41 | { | |
42 | bnx2x_rx_queue_release(rxq); | |
43 | } | |
44 | ||
45 | int | |
46 | bnx2x_dev_rx_queue_setup(struct rte_eth_dev *dev, | |
47 | uint16_t queue_idx, | |
48 | uint16_t nb_desc, | |
49 | unsigned int socket_id, | |
50 | __rte_unused const struct rte_eth_rxconf *rx_conf, | |
51 | struct rte_mempool *mp) | |
52 | { | |
53 | uint16_t j, idx; | |
54 | const struct rte_memzone *dma; | |
55 | struct bnx2x_rx_queue *rxq; | |
56 | uint32_t dma_size; | |
57 | struct rte_mbuf *mbuf; | |
58 | struct bnx2x_softc *sc = dev->data->dev_private; | |
59 | struct bnx2x_fastpath *fp = &sc->fp[queue_idx]; | |
60 | struct eth_rx_cqe_next_page *nextpg; | |
9f95a23c TL |
61 | rte_iova_t *rx_bd; |
62 | rte_iova_t busaddr; | |
7c673cae FG |
63 | |
64 | /* First allocate the rx queue data structure */ | |
65 | rxq = rte_zmalloc_socket("ethdev RX queue", sizeof(struct bnx2x_rx_queue), | |
66 | RTE_CACHE_LINE_SIZE, socket_id); | |
67 | if (NULL == rxq) { | |
9f95a23c | 68 | PMD_DRV_LOG(ERR, sc, "rte_zmalloc for rxq failed!"); |
7c673cae FG |
69 | return -ENOMEM; |
70 | } | |
71 | rxq->sc = sc; | |
72 | rxq->mb_pool = mp; | |
73 | rxq->queue_id = queue_idx; | |
74 | rxq->port_id = dev->data->port_id; | |
75 | ||
76 | rxq->nb_rx_pages = 1; | |
77 | while (USABLE_RX_BD(rxq) < nb_desc) | |
78 | rxq->nb_rx_pages <<= 1; | |
79 | ||
80 | rxq->nb_rx_desc = TOTAL_RX_BD(rxq); | |
81 | sc->rx_ring_size = USABLE_RX_BD(rxq); | |
82 | rxq->nb_cq_pages = RCQ_BD_PAGES(rxq); | |
83 | ||
9f95a23c | 84 | PMD_DRV_LOG(DEBUG, sc, "fp[%02d] req_bd=%u, usable_bd=%lu, " |
7c673cae FG |
85 | "total_bd=%lu, rx_pages=%u, cq_pages=%u", |
86 | queue_idx, nb_desc, (unsigned long)USABLE_RX_BD(rxq), | |
87 | (unsigned long)TOTAL_RX_BD(rxq), rxq->nb_rx_pages, | |
88 | rxq->nb_cq_pages); | |
89 | ||
90 | /* Allocate RX ring hardware descriptors */ | |
91 | dma_size = rxq->nb_rx_desc * sizeof(struct eth_rx_bd); | |
92 | dma = ring_dma_zone_reserve(dev, "hw_ring", queue_idx, dma_size, socket_id); | |
93 | if (NULL == dma) { | |
94 | PMD_RX_LOG(ERR, "ring_dma_zone_reserve for rx_ring failed!"); | |
95 | bnx2x_rx_queue_release(rxq); | |
96 | return -ENOMEM; | |
97 | } | |
9f95a23c | 98 | fp->rx_desc_mapping = rxq->rx_ring_phys_addr = (uint64_t)dma->iova; |
7c673cae FG |
99 | rxq->rx_ring = (uint64_t*)dma->addr; |
100 | memset((void *)rxq->rx_ring, 0, dma_size); | |
101 | ||
102 | /* Link the RX chain pages. */ | |
103 | for (j = 1; j <= rxq->nb_rx_pages; j++) { | |
104 | rx_bd = &rxq->rx_ring[TOTAL_RX_BD_PER_PAGE * j - 2]; | |
105 | busaddr = rxq->rx_ring_phys_addr + BNX2X_PAGE_SIZE * (j % rxq->nb_rx_pages); | |
106 | *rx_bd = busaddr; | |
107 | } | |
108 | ||
109 | /* Allocate software ring */ | |
110 | dma_size = rxq->nb_rx_desc * sizeof(struct bnx2x_rx_entry); | |
111 | rxq->sw_ring = rte_zmalloc_socket("sw_ring", dma_size, | |
112 | RTE_CACHE_LINE_SIZE, | |
113 | socket_id); | |
114 | if (NULL == rxq->sw_ring) { | |
115 | PMD_RX_LOG(ERR, "rte_zmalloc for sw_ring failed!"); | |
116 | bnx2x_rx_queue_release(rxq); | |
117 | return -ENOMEM; | |
118 | } | |
119 | ||
120 | /* Initialize software ring entries */ | |
121 | for (idx = 0; idx < rxq->nb_rx_desc; idx = NEXT_RX_BD(idx)) { | |
122 | mbuf = rte_mbuf_raw_alloc(mp); | |
123 | if (NULL == mbuf) { | |
124 | PMD_RX_LOG(ERR, "RX mbuf alloc failed queue_id=%u, idx=%d", | |
125 | (unsigned)rxq->queue_id, idx); | |
126 | bnx2x_rx_queue_release(rxq); | |
127 | return -ENOMEM; | |
128 | } | |
129 | rxq->sw_ring[idx] = mbuf; | |
9f95a23c TL |
130 | rxq->rx_ring[idx] = |
131 | rte_cpu_to_le_64(rte_mbuf_data_iova_default(mbuf)); | |
7c673cae FG |
132 | } |
133 | rxq->pkt_first_seg = NULL; | |
134 | rxq->pkt_last_seg = NULL; | |
135 | rxq->rx_bd_head = 0; | |
136 | rxq->rx_bd_tail = rxq->nb_rx_desc; | |
137 | ||
138 | /* Allocate CQ chain. */ | |
139 | dma_size = BNX2X_RX_CHAIN_PAGE_SZ * rxq->nb_cq_pages; | |
140 | dma = ring_dma_zone_reserve(dev, "bnx2x_rcq", queue_idx, dma_size, socket_id); | |
141 | if (NULL == dma) { | |
142 | PMD_RX_LOG(ERR, "RCQ alloc failed"); | |
143 | return -ENOMEM; | |
144 | } | |
9f95a23c | 145 | fp->rx_comp_mapping = rxq->cq_ring_phys_addr = (uint64_t)dma->iova; |
7c673cae FG |
146 | rxq->cq_ring = (union eth_rx_cqe*)dma->addr; |
147 | ||
148 | /* Link the CQ chain pages. */ | |
149 | for (j = 1; j <= rxq->nb_cq_pages; j++) { | |
150 | nextpg = &rxq->cq_ring[TOTAL_RCQ_ENTRIES_PER_PAGE * j - 1].next_page_cqe; | |
151 | busaddr = rxq->cq_ring_phys_addr + BNX2X_PAGE_SIZE * (j % rxq->nb_cq_pages); | |
152 | nextpg->addr_hi = rte_cpu_to_le_32(U64_HI(busaddr)); | |
153 | nextpg->addr_lo = rte_cpu_to_le_32(U64_LO(busaddr)); | |
154 | } | |
155 | rxq->rx_cq_head = 0; | |
156 | rxq->rx_cq_tail = TOTAL_RCQ_ENTRIES(rxq); | |
157 | ||
158 | dev->data->rx_queues[queue_idx] = rxq; | |
159 | if (!sc->rx_queues) sc->rx_queues = dev->data->rx_queues; | |
160 | ||
161 | return 0; | |
162 | } | |
163 | ||
164 | static void | |
165 | bnx2x_tx_queue_release(struct bnx2x_tx_queue *tx_queue) | |
166 | { | |
167 | uint16_t i; | |
168 | struct rte_mbuf **sw_ring; | |
169 | ||
170 | if (NULL != tx_queue) { | |
171 | ||
172 | sw_ring = tx_queue->sw_ring; | |
173 | if (NULL != sw_ring) { | |
174 | for (i = 0; i < tx_queue->nb_tx_desc; i++) { | |
175 | if (NULL != sw_ring[i]) | |
176 | rte_pktmbuf_free(sw_ring[i]); | |
177 | } | |
178 | rte_free(sw_ring); | |
179 | } | |
180 | rte_free(tx_queue); | |
181 | } | |
182 | } | |
183 | ||
184 | void | |
185 | bnx2x_dev_tx_queue_release(void *txq) | |
186 | { | |
187 | bnx2x_tx_queue_release(txq); | |
188 | } | |
189 | ||
190 | static uint16_t | |
191 | bnx2x_xmit_pkts(void *p_txq, struct rte_mbuf **tx_pkts, uint16_t nb_pkts) | |
192 | { | |
193 | struct bnx2x_tx_queue *txq; | |
194 | struct bnx2x_softc *sc; | |
195 | struct bnx2x_fastpath *fp; | |
196 | uint16_t nb_tx_pkts; | |
197 | uint16_t nb_pkt_sent = 0; | |
198 | uint32_t ret; | |
199 | ||
200 | txq = p_txq; | |
201 | sc = txq->sc; | |
202 | fp = &sc->fp[txq->queue_id]; | |
203 | ||
204 | if ((unlikely((txq->nb_tx_desc - txq->nb_tx_avail) > | |
205 | txq->tx_free_thresh))) | |
206 | bnx2x_txeof(sc, fp); | |
207 | ||
208 | nb_tx_pkts = RTE_MIN(nb_pkts, txq->nb_tx_avail / BDS_PER_TX_PKT); | |
209 | if (unlikely(nb_tx_pkts == 0)) | |
210 | return 0; | |
211 | ||
212 | while (nb_tx_pkts--) { | |
213 | struct rte_mbuf *m = *tx_pkts++; | |
214 | assert(m != NULL); | |
215 | ret = bnx2x_tx_encap(txq, m); | |
216 | fp->tx_db.data.prod += ret; | |
217 | nb_pkt_sent++; | |
218 | } | |
219 | ||
220 | bnx2x_update_fp_sb_idx(fp); | |
221 | mb(); | |
222 | DOORBELL(sc, txq->queue_id, fp->tx_db.raw); | |
223 | mb(); | |
224 | ||
225 | if ((txq->nb_tx_desc - txq->nb_tx_avail) > | |
226 | txq->tx_free_thresh) | |
227 | bnx2x_txeof(sc, fp); | |
228 | ||
229 | return nb_pkt_sent; | |
230 | } | |
231 | ||
232 | int | |
233 | bnx2x_dev_tx_queue_setup(struct rte_eth_dev *dev, | |
234 | uint16_t queue_idx, | |
235 | uint16_t nb_desc, | |
236 | unsigned int socket_id, | |
237 | const struct rte_eth_txconf *tx_conf) | |
238 | { | |
239 | uint16_t i; | |
240 | unsigned int tsize; | |
241 | const struct rte_memzone *tz; | |
242 | struct bnx2x_tx_queue *txq; | |
243 | struct eth_tx_next_bd *tx_n_bd; | |
244 | uint64_t busaddr; | |
245 | struct bnx2x_softc *sc = dev->data->dev_private; | |
246 | struct bnx2x_fastpath *fp = &sc->fp[queue_idx]; | |
247 | ||
248 | /* First allocate the tx queue data structure */ | |
249 | txq = rte_zmalloc("ethdev TX queue", sizeof(struct bnx2x_tx_queue), | |
250 | RTE_CACHE_LINE_SIZE); | |
251 | if (txq == NULL) | |
252 | return -ENOMEM; | |
253 | txq->sc = sc; | |
254 | ||
255 | txq->nb_tx_pages = 1; | |
256 | while (USABLE_TX_BD(txq) < nb_desc) | |
257 | txq->nb_tx_pages <<= 1; | |
258 | ||
259 | txq->nb_tx_desc = TOTAL_TX_BD(txq); | |
260 | sc->tx_ring_size = TOTAL_TX_BD(txq); | |
261 | ||
262 | txq->tx_free_thresh = tx_conf->tx_free_thresh ? | |
263 | tx_conf->tx_free_thresh : DEFAULT_TX_FREE_THRESH; | |
11fdf7f2 TL |
264 | txq->tx_free_thresh = min(txq->tx_free_thresh, |
265 | txq->nb_tx_desc - BDS_PER_TX_PKT); | |
7c673cae | 266 | |
9f95a23c | 267 | PMD_DRV_LOG(DEBUG, sc, "fp[%02d] req_bd=%u, thresh=%u, usable_bd=%lu, " |
7c673cae FG |
268 | "total_bd=%lu, tx_pages=%u", |
269 | queue_idx, nb_desc, txq->tx_free_thresh, | |
270 | (unsigned long)USABLE_TX_BD(txq), | |
271 | (unsigned long)TOTAL_TX_BD(txq), txq->nb_tx_pages); | |
272 | ||
273 | /* Allocate TX ring hardware descriptors */ | |
274 | tsize = txq->nb_tx_desc * sizeof(union eth_tx_bd_types); | |
275 | tz = ring_dma_zone_reserve(dev, "tx_hw_ring", queue_idx, tsize, socket_id); | |
276 | if (tz == NULL) { | |
277 | bnx2x_tx_queue_release(txq); | |
278 | return -ENOMEM; | |
279 | } | |
9f95a23c | 280 | fp->tx_desc_mapping = txq->tx_ring_phys_addr = (uint64_t)tz->iova; |
7c673cae FG |
281 | txq->tx_ring = (union eth_tx_bd_types *) tz->addr; |
282 | memset(txq->tx_ring, 0, tsize); | |
283 | ||
284 | /* Allocate software ring */ | |
285 | tsize = txq->nb_tx_desc * sizeof(struct rte_mbuf *); | |
286 | txq->sw_ring = rte_zmalloc("tx_sw_ring", tsize, | |
287 | RTE_CACHE_LINE_SIZE); | |
288 | if (txq->sw_ring == NULL) { | |
289 | bnx2x_tx_queue_release(txq); | |
290 | return -ENOMEM; | |
291 | } | |
292 | ||
9f95a23c | 293 | /* PMD_DRV_LOG(DEBUG, sc, "sw_ring=%p hw_ring=%p dma_addr=0x%"PRIx64, |
7c673cae FG |
294 | txq->sw_ring, txq->tx_ring, txq->tx_ring_phys_addr); */ |
295 | ||
296 | /* Link TX pages */ | |
297 | for (i = 1; i <= txq->nb_tx_pages; i++) { | |
298 | tx_n_bd = &txq->tx_ring[TOTAL_TX_BD_PER_PAGE * i - 1].next_bd; | |
299 | busaddr = txq->tx_ring_phys_addr + BNX2X_PAGE_SIZE * (i % txq->nb_tx_pages); | |
300 | tx_n_bd->addr_hi = rte_cpu_to_le_32(U64_HI(busaddr)); | |
301 | tx_n_bd->addr_lo = rte_cpu_to_le_32(U64_LO(busaddr)); | |
9f95a23c TL |
302 | /* PMD_DRV_LOG(DEBUG, sc, "link tx page %lu", |
303 | * (TOTAL_TX_BD_PER_PAGE * i - 1)); | |
304 | */ | |
7c673cae FG |
305 | } |
306 | ||
307 | txq->queue_id = queue_idx; | |
308 | txq->port_id = dev->data->port_id; | |
309 | txq->tx_pkt_tail = 0; | |
310 | txq->tx_pkt_head = 0; | |
311 | txq->tx_bd_tail = 0; | |
312 | txq->tx_bd_head = 0; | |
313 | txq->nb_tx_avail = txq->nb_tx_desc; | |
7c673cae FG |
314 | dev->data->tx_queues[queue_idx] = txq; |
315 | if (!sc->tx_queues) sc->tx_queues = dev->data->tx_queues; | |
316 | ||
317 | return 0; | |
318 | } | |
319 | ||
320 | static inline void | |
321 | bnx2x_upd_rx_prod_fast(struct bnx2x_softc *sc, struct bnx2x_fastpath *fp, | |
322 | uint16_t rx_bd_prod, uint16_t rx_cq_prod) | |
323 | { | |
324 | union ustorm_eth_rx_producers rx_prods; | |
325 | ||
326 | rx_prods.prod.bd_prod = rx_bd_prod; | |
327 | rx_prods.prod.cqe_prod = rx_cq_prod; | |
328 | ||
329 | REG_WR(sc, fp->ustorm_rx_prods_offset, rx_prods.raw_data[0]); | |
330 | } | |
331 | ||
332 | static uint16_t | |
333 | bnx2x_recv_pkts(void *p_rxq, struct rte_mbuf **rx_pkts, uint16_t nb_pkts) | |
334 | { | |
335 | struct bnx2x_rx_queue *rxq = p_rxq; | |
336 | struct bnx2x_softc *sc = rxq->sc; | |
337 | struct bnx2x_fastpath *fp = &sc->fp[rxq->queue_id]; | |
338 | uint32_t nb_rx = 0; | |
339 | uint16_t hw_cq_cons, sw_cq_cons, sw_cq_prod; | |
340 | uint16_t bd_cons, bd_prod; | |
341 | struct rte_mbuf *new_mb; | |
342 | uint16_t rx_pref; | |
343 | struct eth_fast_path_rx_cqe *cqe_fp; | |
344 | uint16_t len, pad; | |
345 | struct rte_mbuf *rx_mb = NULL; | |
346 | ||
347 | hw_cq_cons = le16toh(*fp->rx_cq_cons_sb); | |
348 | if ((hw_cq_cons & USABLE_RCQ_ENTRIES_PER_PAGE) == | |
349 | USABLE_RCQ_ENTRIES_PER_PAGE) { | |
350 | ++hw_cq_cons; | |
351 | } | |
352 | ||
353 | bd_cons = rxq->rx_bd_head; | |
354 | bd_prod = rxq->rx_bd_tail; | |
355 | sw_cq_cons = rxq->rx_cq_head; | |
356 | sw_cq_prod = rxq->rx_cq_tail; | |
357 | ||
358 | if (sw_cq_cons == hw_cq_cons) | |
359 | return 0; | |
360 | ||
361 | while (nb_rx < nb_pkts && sw_cq_cons != hw_cq_cons) { | |
362 | ||
363 | bd_prod &= MAX_RX_BD(rxq); | |
364 | bd_cons &= MAX_RX_BD(rxq); | |
365 | ||
366 | cqe_fp = &rxq->cq_ring[sw_cq_cons & MAX_RX_BD(rxq)].fast_path_cqe; | |
367 | ||
368 | if (unlikely(CQE_TYPE_SLOW(cqe_fp->type_error_flags & ETH_FAST_PATH_RX_CQE_TYPE))) { | |
369 | PMD_RX_LOG(ERR, "slowpath event during traffic processing"); | |
370 | break; | |
371 | } | |
372 | ||
373 | if (unlikely(cqe_fp->type_error_flags & ETH_FAST_PATH_RX_CQE_PHY_DECODE_ERR_FLG)) { | |
374 | PMD_RX_LOG(ERR, "flags 0x%x rx packet %u", | |
375 | cqe_fp->type_error_flags, sw_cq_cons); | |
376 | goto next_rx; | |
377 | } | |
378 | ||
379 | len = cqe_fp->pkt_len_or_gro_seg_len; | |
380 | pad = cqe_fp->placement_offset; | |
381 | ||
382 | new_mb = rte_mbuf_raw_alloc(rxq->mb_pool); | |
383 | if (unlikely(!new_mb)) { | |
384 | PMD_RX_LOG(ERR, "mbuf alloc fail fp[%02d]", fp->index); | |
385 | rte_eth_devices[rxq->port_id].data-> | |
386 | rx_mbuf_alloc_failed++; | |
387 | goto next_rx; | |
388 | } | |
389 | ||
390 | rx_mb = rxq->sw_ring[bd_cons]; | |
391 | rxq->sw_ring[bd_cons] = new_mb; | |
9f95a23c TL |
392 | rxq->rx_ring[bd_prod] = |
393 | rte_cpu_to_le_64(rte_mbuf_data_iova_default(new_mb)); | |
7c673cae FG |
394 | |
395 | rx_pref = NEXT_RX_BD(bd_cons) & MAX_RX_BD(rxq); | |
396 | rte_prefetch0(rxq->sw_ring[rx_pref]); | |
397 | if ((rx_pref & 0x3) == 0) { | |
398 | rte_prefetch0(&rxq->rx_ring[rx_pref]); | |
399 | rte_prefetch0(&rxq->sw_ring[rx_pref]); | |
400 | } | |
401 | ||
9f95a23c | 402 | rx_mb->data_off = pad + RTE_PKTMBUF_HEADROOM; |
7c673cae FG |
403 | rx_mb->nb_segs = 1; |
404 | rx_mb->next = NULL; | |
405 | rx_mb->pkt_len = rx_mb->data_len = len; | |
406 | rx_mb->port = rxq->port_id; | |
407 | rte_prefetch1(rte_pktmbuf_mtod(rx_mb, void *)); | |
408 | ||
409 | /* | |
410 | * If we received a packet with a vlan tag, | |
411 | * attach that information to the packet. | |
412 | */ | |
413 | if (cqe_fp->pars_flags.flags & PARSING_FLAGS_VLAN) { | |
414 | rx_mb->vlan_tci = cqe_fp->vlan_tag; | |
9f95a23c | 415 | rx_mb->ol_flags |= PKT_RX_VLAN; |
7c673cae FG |
416 | } |
417 | ||
418 | rx_pkts[nb_rx] = rx_mb; | |
419 | nb_rx++; | |
420 | ||
421 | /* limit spinning on the queue */ | |
422 | if (unlikely(nb_rx == sc->rx_budget)) { | |
423 | PMD_RX_LOG(ERR, "Limit spinning on the queue"); | |
424 | break; | |
425 | } | |
426 | ||
427 | next_rx: | |
428 | bd_cons = NEXT_RX_BD(bd_cons); | |
429 | bd_prod = NEXT_RX_BD(bd_prod); | |
430 | sw_cq_prod = NEXT_RCQ_IDX(sw_cq_prod); | |
431 | sw_cq_cons = NEXT_RCQ_IDX(sw_cq_cons); | |
432 | } | |
433 | rxq->rx_bd_head = bd_cons; | |
434 | rxq->rx_bd_tail = bd_prod; | |
435 | rxq->rx_cq_head = sw_cq_cons; | |
436 | rxq->rx_cq_tail = sw_cq_prod; | |
437 | ||
438 | bnx2x_upd_rx_prod_fast(sc, fp, bd_prod, sw_cq_prod); | |
439 | ||
440 | return nb_rx; | |
441 | } | |
442 | ||
9f95a23c TL |
443 | static uint16_t |
444 | bnx2x_rxtx_pkts_dummy(__rte_unused void *p_rxq, | |
445 | __rte_unused struct rte_mbuf **rx_pkts, | |
446 | __rte_unused uint16_t nb_pkts) | |
7c673cae | 447 | { |
7c673cae FG |
448 | return 0; |
449 | } | |
450 | ||
9f95a23c TL |
451 | void bnx2x_dev_rxtx_init_dummy(struct rte_eth_dev *dev) |
452 | { | |
453 | dev->rx_pkt_burst = bnx2x_rxtx_pkts_dummy; | |
454 | dev->tx_pkt_burst = bnx2x_rxtx_pkts_dummy; | |
455 | } | |
456 | ||
457 | void bnx2x_dev_rxtx_init(struct rte_eth_dev *dev) | |
458 | { | |
459 | dev->rx_pkt_burst = bnx2x_recv_pkts; | |
460 | dev->tx_pkt_burst = bnx2x_xmit_pkts; | |
461 | } | |
462 | ||
7c673cae FG |
463 | void |
464 | bnx2x_dev_clear_queues(struct rte_eth_dev *dev) | |
465 | { | |
9f95a23c | 466 | struct bnx2x_softc *sc = dev->data->dev_private; |
7c673cae FG |
467 | uint8_t i; |
468 | ||
9f95a23c | 469 | PMD_INIT_FUNC_TRACE(sc); |
7c673cae FG |
470 | |
471 | for (i = 0; i < dev->data->nb_tx_queues; i++) { | |
472 | struct bnx2x_tx_queue *txq = dev->data->tx_queues[i]; | |
473 | if (txq != NULL) { | |
474 | bnx2x_tx_queue_release(txq); | |
475 | dev->data->tx_queues[i] = NULL; | |
476 | } | |
477 | } | |
478 | ||
479 | for (i = 0; i < dev->data->nb_rx_queues; i++) { | |
480 | struct bnx2x_rx_queue *rxq = dev->data->rx_queues[i]; | |
481 | if (rxq != NULL) { | |
482 | bnx2x_rx_queue_release(rxq); | |
483 | dev->data->rx_queues[i] = NULL; | |
484 | } | |
485 | } | |
486 | } |