]> git.proxmox.com Git - ceph.git/blob - ceph/src/dpdk/drivers/net/fm10k/fm10k_rxtx.c
add subtree-ish sources for 12.0.3
[ceph.git] / ceph / src / dpdk / drivers / net / fm10k / fm10k_rxtx.c
1 /*-
2 * BSD LICENSE
3 *
4 * Copyright(c) 2013-2015 Intel Corporation. All rights reserved.
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 *
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
16 * distribution.
17 * * Neither the name of Intel Corporation nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
20 *
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32 */
33
34 #include <inttypes.h>
35
36 #include <rte_ethdev.h>
37 #include <rte_common.h>
38 #include "fm10k.h"
39 #include "base/fm10k_type.h"
40
41 #ifdef RTE_PMD_PACKET_PREFETCH
42 #define rte_packet_prefetch(p) rte_prefetch1(p)
43 #else
44 #define rte_packet_prefetch(p) do {} while (0)
45 #endif
46
47 #ifdef RTE_LIBRTE_FM10K_DEBUG_RX
48 static inline void dump_rxd(union fm10k_rx_desc *rxd)
49 {
50 PMD_RX_LOG(DEBUG, "+----------------|----------------+");
51 PMD_RX_LOG(DEBUG, "| GLORT | PKT HDR & TYPE |");
52 PMD_RX_LOG(DEBUG, "| 0x%08x | 0x%08x |", rxd->d.glort,
53 rxd->d.data);
54 PMD_RX_LOG(DEBUG, "+----------------|----------------+");
55 PMD_RX_LOG(DEBUG, "| VLAN & LEN | STATUS |");
56 PMD_RX_LOG(DEBUG, "| 0x%08x | 0x%08x |", rxd->d.vlan_len,
57 rxd->d.staterr);
58 PMD_RX_LOG(DEBUG, "+----------------|----------------+");
59 PMD_RX_LOG(DEBUG, "| RESERVED | RSS_HASH |");
60 PMD_RX_LOG(DEBUG, "| 0x%08x | 0x%08x |", 0, rxd->d.rss);
61 PMD_RX_LOG(DEBUG, "+----------------|----------------+");
62 PMD_RX_LOG(DEBUG, "| TIME TAG |");
63 PMD_RX_LOG(DEBUG, "| 0x%016"PRIx64" |", rxd->q.timestamp);
64 PMD_RX_LOG(DEBUG, "+----------------|----------------+");
65 }
66 #endif
67
68 /* @note: When this function is changed, make corresponding change to
69 * fm10k_dev_supported_ptypes_get()
70 */
71 static inline void
72 rx_desc_to_ol_flags(struct rte_mbuf *m, const union fm10k_rx_desc *d)
73 {
74 static const uint32_t
75 ptype_table[FM10K_RXD_PKTTYPE_MASK >> FM10K_RXD_PKTTYPE_SHIFT]
76 __rte_cache_aligned = {
77 [FM10K_PKTTYPE_OTHER] = RTE_PTYPE_L2_ETHER,
78 [FM10K_PKTTYPE_IPV4] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4,
79 [FM10K_PKTTYPE_IPV4_EX] = RTE_PTYPE_L2_ETHER |
80 RTE_PTYPE_L3_IPV4_EXT,
81 [FM10K_PKTTYPE_IPV6] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6,
82 [FM10K_PKTTYPE_IPV6_EX] = RTE_PTYPE_L2_ETHER |
83 RTE_PTYPE_L3_IPV6_EXT,
84 [FM10K_PKTTYPE_IPV4 | FM10K_PKTTYPE_TCP] = RTE_PTYPE_L2_ETHER |
85 RTE_PTYPE_L3_IPV4 | RTE_PTYPE_L4_TCP,
86 [FM10K_PKTTYPE_IPV6 | FM10K_PKTTYPE_TCP] = RTE_PTYPE_L2_ETHER |
87 RTE_PTYPE_L3_IPV6 | RTE_PTYPE_L4_TCP,
88 [FM10K_PKTTYPE_IPV4 | FM10K_PKTTYPE_UDP] = RTE_PTYPE_L2_ETHER |
89 RTE_PTYPE_L3_IPV4 | RTE_PTYPE_L4_UDP,
90 [FM10K_PKTTYPE_IPV6 | FM10K_PKTTYPE_UDP] = RTE_PTYPE_L2_ETHER |
91 RTE_PTYPE_L3_IPV6 | RTE_PTYPE_L4_UDP,
92 };
93
94 m->packet_type = ptype_table[(d->w.pkt_info & FM10K_RXD_PKTTYPE_MASK)
95 >> FM10K_RXD_PKTTYPE_SHIFT];
96
97 if (d->w.pkt_info & FM10K_RXD_RSSTYPE_MASK)
98 m->ol_flags |= PKT_RX_RSS_HASH;
99
100 if (unlikely((d->d.staterr &
101 (FM10K_RXD_STATUS_IPCS | FM10K_RXD_STATUS_IPE)) ==
102 (FM10K_RXD_STATUS_IPCS | FM10K_RXD_STATUS_IPE)))
103 m->ol_flags |= PKT_RX_IP_CKSUM_BAD;
104 else
105 m->ol_flags |= PKT_RX_IP_CKSUM_GOOD;
106
107 if (unlikely((d->d.staterr &
108 (FM10K_RXD_STATUS_L4CS | FM10K_RXD_STATUS_L4E)) ==
109 (FM10K_RXD_STATUS_L4CS | FM10K_RXD_STATUS_L4E)))
110 m->ol_flags |= PKT_RX_L4_CKSUM_BAD;
111 else
112 m->ol_flags |= PKT_RX_L4_CKSUM_GOOD;
113 }
114
115 uint16_t
116 fm10k_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
117 uint16_t nb_pkts)
118 {
119 struct rte_mbuf *mbuf;
120 union fm10k_rx_desc desc;
121 struct fm10k_rx_queue *q = rx_queue;
122 uint16_t count = 0;
123 int alloc = 0;
124 uint16_t next_dd;
125 int ret;
126
127 next_dd = q->next_dd;
128
129 nb_pkts = RTE_MIN(nb_pkts, q->alloc_thresh);
130 for (count = 0; count < nb_pkts; ++count) {
131 if (!(q->hw_ring[next_dd].d.staterr & FM10K_RXD_STATUS_DD))
132 break;
133 mbuf = q->sw_ring[next_dd];
134 desc = q->hw_ring[next_dd];
135 #ifdef RTE_LIBRTE_FM10K_DEBUG_RX
136 dump_rxd(&desc);
137 #endif
138 rte_pktmbuf_pkt_len(mbuf) = desc.w.length;
139 rte_pktmbuf_data_len(mbuf) = desc.w.length;
140
141 mbuf->ol_flags = 0;
142 #ifdef RTE_LIBRTE_FM10K_RX_OLFLAGS_ENABLE
143 rx_desc_to_ol_flags(mbuf, &desc);
144 #endif
145
146 mbuf->hash.rss = desc.d.rss;
147 /**
148 * Packets in fm10k device always carry at least one VLAN tag.
149 * For those packets coming in without VLAN tag,
150 * the port default VLAN tag will be used.
151 * So, always PKT_RX_VLAN_PKT flag is set and vlan_tci
152 * is valid for each RX packet's mbuf.
153 */
154 mbuf->ol_flags |= PKT_RX_VLAN_PKT;
155 mbuf->vlan_tci = desc.w.vlan;
156 /**
157 * mbuf->vlan_tci_outer is an idle field in fm10k driver,
158 * so it can be selected to store sglort value.
159 */
160 if (q->rx_ftag_en)
161 mbuf->vlan_tci_outer = rte_le_to_cpu_16(desc.w.sglort);
162
163 rx_pkts[count] = mbuf;
164 if (++next_dd == q->nb_desc) {
165 next_dd = 0;
166 alloc = 1;
167 }
168
169 /* Prefetch next mbuf while processing current one. */
170 rte_prefetch0(q->sw_ring[next_dd]);
171
172 /*
173 * When next RX descriptor is on a cache-line boundary,
174 * prefetch the next 4 RX descriptors and the next 8 pointers
175 * to mbufs.
176 */
177 if ((next_dd & 0x3) == 0) {
178 rte_prefetch0(&q->hw_ring[next_dd]);
179 rte_prefetch0(&q->sw_ring[next_dd]);
180 }
181 }
182
183 q->next_dd = next_dd;
184
185 if ((q->next_dd > q->next_trigger) || (alloc == 1)) {
186 ret = rte_mempool_get_bulk(q->mp,
187 (void **)&q->sw_ring[q->next_alloc],
188 q->alloc_thresh);
189
190 if (unlikely(ret != 0)) {
191 uint8_t port = q->port_id;
192 PMD_RX_LOG(ERR, "Failed to alloc mbuf");
193 /*
194 * Need to restore next_dd if we cannot allocate new
195 * buffers to replenish the old ones.
196 */
197 q->next_dd = (q->next_dd + q->nb_desc - count) %
198 q->nb_desc;
199 rte_eth_devices[port].data->rx_mbuf_alloc_failed++;
200 return 0;
201 }
202
203 for (; q->next_alloc <= q->next_trigger; ++q->next_alloc) {
204 mbuf = q->sw_ring[q->next_alloc];
205
206 /* setup static mbuf fields */
207 fm10k_pktmbuf_reset(mbuf, q->port_id);
208
209 /* write descriptor */
210 desc.q.pkt_addr = MBUF_DMA_ADDR_DEFAULT(mbuf);
211 desc.q.hdr_addr = MBUF_DMA_ADDR_DEFAULT(mbuf);
212 q->hw_ring[q->next_alloc] = desc;
213 }
214 FM10K_PCI_REG_WRITE(q->tail_ptr, q->next_trigger);
215 q->next_trigger += q->alloc_thresh;
216 if (q->next_trigger >= q->nb_desc) {
217 q->next_trigger = q->alloc_thresh - 1;
218 q->next_alloc = 0;
219 }
220 }
221
222 return count;
223 }
224
225 uint16_t
226 fm10k_recv_scattered_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
227 uint16_t nb_pkts)
228 {
229 struct rte_mbuf *mbuf;
230 union fm10k_rx_desc desc;
231 struct fm10k_rx_queue *q = rx_queue;
232 uint16_t count = 0;
233 uint16_t nb_rcv, nb_seg;
234 int alloc = 0;
235 uint16_t next_dd;
236 struct rte_mbuf *first_seg = q->pkt_first_seg;
237 struct rte_mbuf *last_seg = q->pkt_last_seg;
238 int ret;
239
240 next_dd = q->next_dd;
241 nb_rcv = 0;
242
243 nb_seg = RTE_MIN(nb_pkts, q->alloc_thresh);
244 for (count = 0; count < nb_seg; count++) {
245 if (!(q->hw_ring[next_dd].d.staterr & FM10K_RXD_STATUS_DD))
246 break;
247 mbuf = q->sw_ring[next_dd];
248 desc = q->hw_ring[next_dd];
249 #ifdef RTE_LIBRTE_FM10K_DEBUG_RX
250 dump_rxd(&desc);
251 #endif
252
253 if (++next_dd == q->nb_desc) {
254 next_dd = 0;
255 alloc = 1;
256 }
257
258 /* Prefetch next mbuf while processing current one. */
259 rte_prefetch0(q->sw_ring[next_dd]);
260
261 /*
262 * When next RX descriptor is on a cache-line boundary,
263 * prefetch the next 4 RX descriptors and the next 8 pointers
264 * to mbufs.
265 */
266 if ((next_dd & 0x3) == 0) {
267 rte_prefetch0(&q->hw_ring[next_dd]);
268 rte_prefetch0(&q->sw_ring[next_dd]);
269 }
270
271 /* Fill data length */
272 rte_pktmbuf_data_len(mbuf) = desc.w.length;
273
274 /*
275 * If this is the first buffer of the received packet,
276 * set the pointer to the first mbuf of the packet and
277 * initialize its context.
278 * Otherwise, update the total length and the number of segments
279 * of the current scattered packet, and update the pointer to
280 * the last mbuf of the current packet.
281 */
282 if (!first_seg) {
283 first_seg = mbuf;
284 first_seg->pkt_len = desc.w.length;
285 } else {
286 first_seg->pkt_len =
287 (uint16_t)(first_seg->pkt_len +
288 rte_pktmbuf_data_len(mbuf));
289 first_seg->nb_segs++;
290 last_seg->next = mbuf;
291 }
292
293 /*
294 * If this is not the last buffer of the received packet,
295 * update the pointer to the last mbuf of the current scattered
296 * packet and continue to parse the RX ring.
297 */
298 if (!(desc.d.staterr & FM10K_RXD_STATUS_EOP)) {
299 last_seg = mbuf;
300 continue;
301 }
302
303 first_seg->ol_flags = 0;
304 #ifdef RTE_LIBRTE_FM10K_RX_OLFLAGS_ENABLE
305 rx_desc_to_ol_flags(first_seg, &desc);
306 #endif
307 first_seg->hash.rss = desc.d.rss;
308 /**
309 * Packets in fm10k device always carry at least one VLAN tag.
310 * For those packets coming in without VLAN tag,
311 * the port default VLAN tag will be used.
312 * So, always PKT_RX_VLAN_PKT flag is set and vlan_tci
313 * is valid for each RX packet's mbuf.
314 */
315 first_seg->ol_flags |= PKT_RX_VLAN_PKT;
316 first_seg->vlan_tci = desc.w.vlan;
317 /**
318 * mbuf->vlan_tci_outer is an idle field in fm10k driver,
319 * so it can be selected to store sglort value.
320 */
321 if (q->rx_ftag_en)
322 first_seg->vlan_tci_outer =
323 rte_le_to_cpu_16(desc.w.sglort);
324
325 /* Prefetch data of first segment, if configured to do so. */
326 rte_packet_prefetch((char *)first_seg->buf_addr +
327 first_seg->data_off);
328
329 /*
330 * Store the mbuf address into the next entry of the array
331 * of returned packets.
332 */
333 rx_pkts[nb_rcv++] = first_seg;
334
335 /*
336 * Setup receipt context for a new packet.
337 */
338 first_seg = NULL;
339 }
340
341 q->next_dd = next_dd;
342
343 if ((q->next_dd > q->next_trigger) || (alloc == 1)) {
344 ret = rte_mempool_get_bulk(q->mp,
345 (void **)&q->sw_ring[q->next_alloc],
346 q->alloc_thresh);
347
348 if (unlikely(ret != 0)) {
349 uint8_t port = q->port_id;
350 PMD_RX_LOG(ERR, "Failed to alloc mbuf");
351 /*
352 * Need to restore next_dd if we cannot allocate new
353 * buffers to replenish the old ones.
354 */
355 q->next_dd = (q->next_dd + q->nb_desc - count) %
356 q->nb_desc;
357 rte_eth_devices[port].data->rx_mbuf_alloc_failed++;
358 return 0;
359 }
360
361 for (; q->next_alloc <= q->next_trigger; ++q->next_alloc) {
362 mbuf = q->sw_ring[q->next_alloc];
363
364 /* setup static mbuf fields */
365 fm10k_pktmbuf_reset(mbuf, q->port_id);
366
367 /* write descriptor */
368 desc.q.pkt_addr = MBUF_DMA_ADDR_DEFAULT(mbuf);
369 desc.q.hdr_addr = MBUF_DMA_ADDR_DEFAULT(mbuf);
370 q->hw_ring[q->next_alloc] = desc;
371 }
372 FM10K_PCI_REG_WRITE(q->tail_ptr, q->next_trigger);
373 q->next_trigger += q->alloc_thresh;
374 if (q->next_trigger >= q->nb_desc) {
375 q->next_trigger = q->alloc_thresh - 1;
376 q->next_alloc = 0;
377 }
378 }
379
380 q->pkt_first_seg = first_seg;
381 q->pkt_last_seg = last_seg;
382
383 return nb_rcv;
384 }
385
386 int
387 fm10k_dev_rx_descriptor_done(void *rx_queue, uint16_t offset)
388 {
389 volatile union fm10k_rx_desc *rxdp;
390 struct fm10k_rx_queue *rxq = rx_queue;
391 uint16_t desc;
392 int ret;
393
394 if (unlikely(offset >= rxq->nb_desc)) {
395 PMD_DRV_LOG(ERR, "Invalid RX descriptor offset %u", offset);
396 return 0;
397 }
398
399 desc = rxq->next_dd + offset;
400 if (desc >= rxq->nb_desc)
401 desc -= rxq->nb_desc;
402
403 rxdp = &rxq->hw_ring[desc];
404
405 ret = !!(rxdp->w.status &
406 rte_cpu_to_le_16(FM10K_RXD_STATUS_DD));
407
408 return ret;
409 }
410
411 /*
412 * Free multiple TX mbuf at a time if they are in the same pool
413 *
414 * @txep: software desc ring index that starts to free
415 * @num: number of descs to free
416 *
417 */
418 static inline void tx_free_bulk_mbuf(struct rte_mbuf **txep, int num)
419 {
420 struct rte_mbuf *m, *free[RTE_FM10K_TX_MAX_FREE_BUF_SZ];
421 int i;
422 int nb_free = 0;
423
424 if (unlikely(num == 0))
425 return;
426
427 m = __rte_pktmbuf_prefree_seg(txep[0]);
428 if (likely(m != NULL)) {
429 free[0] = m;
430 nb_free = 1;
431 for (i = 1; i < num; i++) {
432 m = __rte_pktmbuf_prefree_seg(txep[i]);
433 if (likely(m != NULL)) {
434 if (likely(m->pool == free[0]->pool))
435 free[nb_free++] = m;
436 else {
437 rte_mempool_put_bulk(free[0]->pool,
438 (void *)free, nb_free);
439 free[0] = m;
440 nb_free = 1;
441 }
442 }
443 txep[i] = NULL;
444 }
445 rte_mempool_put_bulk(free[0]->pool, (void **)free, nb_free);
446 } else {
447 for (i = 1; i < num; i++) {
448 m = __rte_pktmbuf_prefree_seg(txep[i]);
449 if (m != NULL)
450 rte_mempool_put(m->pool, m);
451 txep[i] = NULL;
452 }
453 }
454 }
455
456 static inline void tx_free_descriptors(struct fm10k_tx_queue *q)
457 {
458 uint16_t next_rs, count = 0;
459
460 next_rs = fifo_peek(&q->rs_tracker);
461 if (!(q->hw_ring[next_rs].flags & FM10K_TXD_FLAG_DONE))
462 return;
463
464 /* the DONE flag is set on this descriptor so remove the ID
465 * from the RS bit tracker and free the buffers */
466 fifo_remove(&q->rs_tracker);
467
468 /* wrap around? if so, free buffers from last_free up to but NOT
469 * including nb_desc */
470 if (q->last_free > next_rs) {
471 count = q->nb_desc - q->last_free;
472 tx_free_bulk_mbuf(&q->sw_ring[q->last_free], count);
473 q->last_free = 0;
474 }
475
476 /* adjust free descriptor count before the next loop */
477 q->nb_free += count + (next_rs + 1 - q->last_free);
478
479 /* free buffers from last_free, up to and including next_rs */
480 if (q->last_free <= next_rs) {
481 count = next_rs - q->last_free + 1;
482 tx_free_bulk_mbuf(&q->sw_ring[q->last_free], count);
483 q->last_free += count;
484 }
485
486 if (q->last_free == q->nb_desc)
487 q->last_free = 0;
488 }
489
490 static inline void tx_xmit_pkt(struct fm10k_tx_queue *q, struct rte_mbuf *mb)
491 {
492 uint16_t last_id;
493 uint8_t flags, hdrlen;
494
495 /* always set the LAST flag on the last descriptor used to
496 * transmit the packet */
497 flags = FM10K_TXD_FLAG_LAST;
498 last_id = q->next_free + mb->nb_segs - 1;
499 if (last_id >= q->nb_desc)
500 last_id = last_id - q->nb_desc;
501
502 /* but only set the RS flag on the last descriptor if rs_thresh
503 * descriptors will be used since the RS flag was last set */
504 if ((q->nb_used + mb->nb_segs) >= q->rs_thresh) {
505 flags |= FM10K_TXD_FLAG_RS;
506 fifo_insert(&q->rs_tracker, last_id);
507 q->nb_used = 0;
508 } else {
509 q->nb_used = q->nb_used + mb->nb_segs;
510 }
511
512 q->nb_free -= mb->nb_segs;
513
514 q->hw_ring[q->next_free].flags = 0;
515 if (q->tx_ftag_en)
516 q->hw_ring[q->next_free].flags |= FM10K_TXD_FLAG_FTAG;
517 /* set checksum flags on first descriptor of packet. SCTP checksum
518 * offload is not supported, but we do not explicitly check for this
519 * case in favor of greatly simplified processing. */
520 if (mb->ol_flags & (PKT_TX_IP_CKSUM | PKT_TX_L4_MASK | PKT_TX_TCP_SEG))
521 q->hw_ring[q->next_free].flags |= FM10K_TXD_FLAG_CSUM;
522
523 /* set vlan if requested */
524 if (mb->ol_flags & PKT_TX_VLAN_PKT)
525 q->hw_ring[q->next_free].vlan = mb->vlan_tci;
526
527 q->sw_ring[q->next_free] = mb;
528 q->hw_ring[q->next_free].buffer_addr =
529 rte_cpu_to_le_64(MBUF_DMA_ADDR(mb));
530 q->hw_ring[q->next_free].buflen =
531 rte_cpu_to_le_16(rte_pktmbuf_data_len(mb));
532
533 if (mb->ol_flags & PKT_TX_TCP_SEG) {
534 hdrlen = mb->outer_l2_len + mb->outer_l3_len + mb->l2_len +
535 mb->l3_len + mb->l4_len;
536 if (q->hw_ring[q->next_free].flags & FM10K_TXD_FLAG_FTAG)
537 hdrlen += sizeof(struct fm10k_ftag);
538
539 if (likely((hdrlen >= FM10K_TSO_MIN_HEADERLEN) &&
540 (hdrlen <= FM10K_TSO_MAX_HEADERLEN) &&
541 (mb->tso_segsz >= FM10K_TSO_MINMSS))) {
542 q->hw_ring[q->next_free].mss = mb->tso_segsz;
543 q->hw_ring[q->next_free].hdrlen = hdrlen;
544 }
545 }
546
547 if (++q->next_free == q->nb_desc)
548 q->next_free = 0;
549
550 /* fill up the rings */
551 for (mb = mb->next; mb != NULL; mb = mb->next) {
552 q->sw_ring[q->next_free] = mb;
553 q->hw_ring[q->next_free].buffer_addr =
554 rte_cpu_to_le_64(MBUF_DMA_ADDR(mb));
555 q->hw_ring[q->next_free].buflen =
556 rte_cpu_to_le_16(rte_pktmbuf_data_len(mb));
557 q->hw_ring[q->next_free].flags = 0;
558 if (++q->next_free == q->nb_desc)
559 q->next_free = 0;
560 }
561
562 q->hw_ring[last_id].flags |= flags;
563 }
564
565 uint16_t
566 fm10k_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
567 uint16_t nb_pkts)
568 {
569 struct fm10k_tx_queue *q = tx_queue;
570 struct rte_mbuf *mb;
571 uint16_t count;
572
573 for (count = 0; count < nb_pkts; ++count) {
574 mb = tx_pkts[count];
575
576 /* running low on descriptors? try to free some... */
577 if (q->nb_free < q->free_thresh)
578 tx_free_descriptors(q);
579
580 /* make sure there are enough free descriptors to transmit the
581 * entire packet before doing anything */
582 if (q->nb_free < mb->nb_segs)
583 break;
584
585 /* sanity check to make sure the mbuf is valid */
586 if ((mb->nb_segs == 0) ||
587 ((mb->nb_segs > 1) && (mb->next == NULL)))
588 break;
589
590 /* process the packet */
591 tx_xmit_pkt(q, mb);
592 }
593
594 /* update the tail pointer if any packets were processed */
595 if (likely(count > 0))
596 FM10K_PCI_REG_WRITE(q->tail_ptr, q->next_free);
597
598 return count;
599 }