1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2013-2016 Intel Corporation
7 #include <rte_ethdev_driver.h>
8 #include <rte_common.h>
11 #include "base/fm10k_type.h"
13 #ifdef RTE_PMD_PACKET_PREFETCH
14 #define rte_packet_prefetch(p) rte_prefetch1(p)
16 #define rte_packet_prefetch(p) do {} while (0)
19 #ifdef RTE_LIBRTE_FM10K_DEBUG_RX
20 static inline void dump_rxd(union fm10k_rx_desc
*rxd
)
22 PMD_RX_LOG(DEBUG
, "+----------------|----------------+");
23 PMD_RX_LOG(DEBUG
, "| GLORT | PKT HDR & TYPE |");
24 PMD_RX_LOG(DEBUG
, "| 0x%08x | 0x%08x |", rxd
->d
.glort
,
26 PMD_RX_LOG(DEBUG
, "+----------------|----------------+");
27 PMD_RX_LOG(DEBUG
, "| VLAN & LEN | STATUS |");
28 PMD_RX_LOG(DEBUG
, "| 0x%08x | 0x%08x |", rxd
->d
.vlan_len
,
30 PMD_RX_LOG(DEBUG
, "+----------------|----------------+");
31 PMD_RX_LOG(DEBUG
, "| RESERVED | RSS_HASH |");
32 PMD_RX_LOG(DEBUG
, "| 0x%08x | 0x%08x |", 0, rxd
->d
.rss
);
33 PMD_RX_LOG(DEBUG
, "+----------------|----------------+");
34 PMD_RX_LOG(DEBUG
, "| TIME TAG |");
35 PMD_RX_LOG(DEBUG
, "| 0x%016"PRIx64
" |", rxd
->q
.timestamp
);
36 PMD_RX_LOG(DEBUG
, "+----------------|----------------+");
40 #define FM10K_TX_OFFLOAD_MASK ( \
48 #define FM10K_TX_OFFLOAD_NOTSUP_MASK \
49 (PKT_TX_OFFLOAD_MASK ^ FM10K_TX_OFFLOAD_MASK)
51 /* @note: When this function is changed, make corresponding change to
52 * fm10k_dev_supported_ptypes_get()
55 rx_desc_to_ol_flags(struct rte_mbuf
*m
, const union fm10k_rx_desc
*d
)
58 ptype_table
[FM10K_RXD_PKTTYPE_MASK
>> FM10K_RXD_PKTTYPE_SHIFT
]
59 __rte_cache_aligned
= {
60 [FM10K_PKTTYPE_OTHER
] = RTE_PTYPE_L2_ETHER
,
61 [FM10K_PKTTYPE_IPV4
] = RTE_PTYPE_L2_ETHER
| RTE_PTYPE_L3_IPV4
,
62 [FM10K_PKTTYPE_IPV4_EX
] = RTE_PTYPE_L2_ETHER
|
63 RTE_PTYPE_L3_IPV4_EXT
,
64 [FM10K_PKTTYPE_IPV6
] = RTE_PTYPE_L2_ETHER
| RTE_PTYPE_L3_IPV6
,
65 [FM10K_PKTTYPE_IPV6_EX
] = RTE_PTYPE_L2_ETHER
|
66 RTE_PTYPE_L3_IPV6_EXT
,
67 [FM10K_PKTTYPE_IPV4
| FM10K_PKTTYPE_TCP
] = RTE_PTYPE_L2_ETHER
|
68 RTE_PTYPE_L3_IPV4
| RTE_PTYPE_L4_TCP
,
69 [FM10K_PKTTYPE_IPV6
| FM10K_PKTTYPE_TCP
] = RTE_PTYPE_L2_ETHER
|
70 RTE_PTYPE_L3_IPV6
| RTE_PTYPE_L4_TCP
,
71 [FM10K_PKTTYPE_IPV4
| FM10K_PKTTYPE_UDP
] = RTE_PTYPE_L2_ETHER
|
72 RTE_PTYPE_L3_IPV4
| RTE_PTYPE_L4_UDP
,
73 [FM10K_PKTTYPE_IPV6
| FM10K_PKTTYPE_UDP
] = RTE_PTYPE_L2_ETHER
|
74 RTE_PTYPE_L3_IPV6
| RTE_PTYPE_L4_UDP
,
77 m
->packet_type
= ptype_table
[(d
->w
.pkt_info
& FM10K_RXD_PKTTYPE_MASK
)
78 >> FM10K_RXD_PKTTYPE_SHIFT
];
80 if (d
->w
.pkt_info
& FM10K_RXD_RSSTYPE_MASK
)
81 m
->ol_flags
|= PKT_RX_RSS_HASH
;
83 if (unlikely((d
->d
.staterr
&
84 (FM10K_RXD_STATUS_IPCS
| FM10K_RXD_STATUS_IPE
)) ==
85 (FM10K_RXD_STATUS_IPCS
| FM10K_RXD_STATUS_IPE
)))
86 m
->ol_flags
|= PKT_RX_IP_CKSUM_BAD
;
88 m
->ol_flags
|= PKT_RX_IP_CKSUM_GOOD
;
90 if (unlikely((d
->d
.staterr
&
91 (FM10K_RXD_STATUS_L4CS
| FM10K_RXD_STATUS_L4E
)) ==
92 (FM10K_RXD_STATUS_L4CS
| FM10K_RXD_STATUS_L4E
)))
93 m
->ol_flags
|= PKT_RX_L4_CKSUM_BAD
;
95 m
->ol_flags
|= PKT_RX_L4_CKSUM_GOOD
;
99 fm10k_recv_pkts(void *rx_queue
, struct rte_mbuf
**rx_pkts
,
102 struct rte_mbuf
*mbuf
;
103 union fm10k_rx_desc desc
;
104 struct fm10k_rx_queue
*q
= rx_queue
;
110 next_dd
= q
->next_dd
;
112 nb_pkts
= RTE_MIN(nb_pkts
, q
->alloc_thresh
);
113 for (count
= 0; count
< nb_pkts
; ++count
) {
114 if (!(q
->hw_ring
[next_dd
].d
.staterr
& FM10K_RXD_STATUS_DD
))
116 mbuf
= q
->sw_ring
[next_dd
];
117 desc
= q
->hw_ring
[next_dd
];
118 #ifdef RTE_LIBRTE_FM10K_DEBUG_RX
121 rte_pktmbuf_pkt_len(mbuf
) = desc
.w
.length
;
122 rte_pktmbuf_data_len(mbuf
) = desc
.w
.length
;
125 #ifdef RTE_LIBRTE_FM10K_RX_OLFLAGS_ENABLE
126 rx_desc_to_ol_flags(mbuf
, &desc
);
129 mbuf
->hash
.rss
= desc
.d
.rss
;
131 * Packets in fm10k device always carry at least one VLAN tag.
132 * For those packets coming in without VLAN tag,
133 * the port default VLAN tag will be used.
134 * So, always PKT_RX_VLAN flag is set and vlan_tci
135 * is valid for each RX packet's mbuf.
137 mbuf
->ol_flags
|= PKT_RX_VLAN
| PKT_RX_VLAN_STRIPPED
;
138 mbuf
->vlan_tci
= desc
.w
.vlan
;
140 * mbuf->vlan_tci_outer is an idle field in fm10k driver,
141 * so it can be selected to store sglort value.
144 mbuf
->vlan_tci_outer
= rte_le_to_cpu_16(desc
.w
.sglort
);
146 rx_pkts
[count
] = mbuf
;
147 if (++next_dd
== q
->nb_desc
) {
152 /* Prefetch next mbuf while processing current one. */
153 rte_prefetch0(q
->sw_ring
[next_dd
]);
156 * When next RX descriptor is on a cache-line boundary,
157 * prefetch the next 4 RX descriptors and the next 8 pointers
160 if ((next_dd
& 0x3) == 0) {
161 rte_prefetch0(&q
->hw_ring
[next_dd
]);
162 rte_prefetch0(&q
->sw_ring
[next_dd
]);
166 q
->next_dd
= next_dd
;
168 if ((q
->next_dd
> q
->next_trigger
) || (alloc
== 1)) {
169 ret
= rte_mempool_get_bulk(q
->mp
,
170 (void **)&q
->sw_ring
[q
->next_alloc
],
173 if (unlikely(ret
!= 0)) {
174 uint16_t port
= q
->port_id
;
175 PMD_RX_LOG(ERR
, "Failed to alloc mbuf");
177 * Need to restore next_dd if we cannot allocate new
178 * buffers to replenish the old ones.
180 q
->next_dd
= (q
->next_dd
+ q
->nb_desc
- count
) %
182 rte_eth_devices
[port
].data
->rx_mbuf_alloc_failed
++;
186 for (; q
->next_alloc
<= q
->next_trigger
; ++q
->next_alloc
) {
187 mbuf
= q
->sw_ring
[q
->next_alloc
];
189 /* setup static mbuf fields */
190 fm10k_pktmbuf_reset(mbuf
, q
->port_id
);
192 /* write descriptor */
193 desc
.q
.pkt_addr
= MBUF_DMA_ADDR_DEFAULT(mbuf
);
194 desc
.q
.hdr_addr
= MBUF_DMA_ADDR_DEFAULT(mbuf
);
195 q
->hw_ring
[q
->next_alloc
] = desc
;
197 FM10K_PCI_REG_WRITE(q
->tail_ptr
, q
->next_trigger
);
198 q
->next_trigger
+= q
->alloc_thresh
;
199 if (q
->next_trigger
>= q
->nb_desc
) {
200 q
->next_trigger
= q
->alloc_thresh
- 1;
209 fm10k_recv_scattered_pkts(void *rx_queue
, struct rte_mbuf
**rx_pkts
,
212 struct rte_mbuf
*mbuf
;
213 union fm10k_rx_desc desc
;
214 struct fm10k_rx_queue
*q
= rx_queue
;
216 uint16_t nb_rcv
, nb_seg
;
219 struct rte_mbuf
*first_seg
= q
->pkt_first_seg
;
220 struct rte_mbuf
*last_seg
= q
->pkt_last_seg
;
223 next_dd
= q
->next_dd
;
226 nb_seg
= RTE_MIN(nb_pkts
, q
->alloc_thresh
);
227 for (count
= 0; count
< nb_seg
; count
++) {
228 if (!(q
->hw_ring
[next_dd
].d
.staterr
& FM10K_RXD_STATUS_DD
))
230 mbuf
= q
->sw_ring
[next_dd
];
231 desc
= q
->hw_ring
[next_dd
];
232 #ifdef RTE_LIBRTE_FM10K_DEBUG_RX
236 if (++next_dd
== q
->nb_desc
) {
241 /* Prefetch next mbuf while processing current one. */
242 rte_prefetch0(q
->sw_ring
[next_dd
]);
245 * When next RX descriptor is on a cache-line boundary,
246 * prefetch the next 4 RX descriptors and the next 8 pointers
249 if ((next_dd
& 0x3) == 0) {
250 rte_prefetch0(&q
->hw_ring
[next_dd
]);
251 rte_prefetch0(&q
->sw_ring
[next_dd
]);
254 /* Fill data length */
255 rte_pktmbuf_data_len(mbuf
) = desc
.w
.length
;
258 * If this is the first buffer of the received packet,
259 * set the pointer to the first mbuf of the packet and
260 * initialize its context.
261 * Otherwise, update the total length and the number of segments
262 * of the current scattered packet, and update the pointer to
263 * the last mbuf of the current packet.
267 first_seg
->pkt_len
= desc
.w
.length
;
270 (uint16_t)(first_seg
->pkt_len
+
271 rte_pktmbuf_data_len(mbuf
));
272 first_seg
->nb_segs
++;
273 last_seg
->next
= mbuf
;
277 * If this is not the last buffer of the received packet,
278 * update the pointer to the last mbuf of the current scattered
279 * packet and continue to parse the RX ring.
281 if (!(desc
.d
.staterr
& FM10K_RXD_STATUS_EOP
)) {
286 first_seg
->ol_flags
= 0;
287 #ifdef RTE_LIBRTE_FM10K_RX_OLFLAGS_ENABLE
288 rx_desc_to_ol_flags(first_seg
, &desc
);
290 first_seg
->hash
.rss
= desc
.d
.rss
;
292 * Packets in fm10k device always carry at least one VLAN tag.
293 * For those packets coming in without VLAN tag,
294 * the port default VLAN tag will be used.
295 * So, always PKT_RX_VLAN flag is set and vlan_tci
296 * is valid for each RX packet's mbuf.
298 first_seg
->ol_flags
|= PKT_RX_VLAN
| PKT_RX_VLAN_STRIPPED
;
299 first_seg
->vlan_tci
= desc
.w
.vlan
;
301 * mbuf->vlan_tci_outer is an idle field in fm10k driver,
302 * so it can be selected to store sglort value.
305 first_seg
->vlan_tci_outer
=
306 rte_le_to_cpu_16(desc
.w
.sglort
);
308 /* Prefetch data of first segment, if configured to do so. */
309 rte_packet_prefetch((char *)first_seg
->buf_addr
+
310 first_seg
->data_off
);
313 * Store the mbuf address into the next entry of the array
314 * of returned packets.
316 rx_pkts
[nb_rcv
++] = first_seg
;
319 * Setup receipt context for a new packet.
324 q
->next_dd
= next_dd
;
326 if ((q
->next_dd
> q
->next_trigger
) || (alloc
== 1)) {
327 ret
= rte_mempool_get_bulk(q
->mp
,
328 (void **)&q
->sw_ring
[q
->next_alloc
],
331 if (unlikely(ret
!= 0)) {
332 uint16_t port
= q
->port_id
;
333 PMD_RX_LOG(ERR
, "Failed to alloc mbuf");
335 * Need to restore next_dd if we cannot allocate new
336 * buffers to replenish the old ones.
338 q
->next_dd
= (q
->next_dd
+ q
->nb_desc
- count
) %
340 rte_eth_devices
[port
].data
->rx_mbuf_alloc_failed
++;
344 for (; q
->next_alloc
<= q
->next_trigger
; ++q
->next_alloc
) {
345 mbuf
= q
->sw_ring
[q
->next_alloc
];
347 /* setup static mbuf fields */
348 fm10k_pktmbuf_reset(mbuf
, q
->port_id
);
350 /* write descriptor */
351 desc
.q
.pkt_addr
= MBUF_DMA_ADDR_DEFAULT(mbuf
);
352 desc
.q
.hdr_addr
= MBUF_DMA_ADDR_DEFAULT(mbuf
);
353 q
->hw_ring
[q
->next_alloc
] = desc
;
355 FM10K_PCI_REG_WRITE(q
->tail_ptr
, q
->next_trigger
);
356 q
->next_trigger
+= q
->alloc_thresh
;
357 if (q
->next_trigger
>= q
->nb_desc
) {
358 q
->next_trigger
= q
->alloc_thresh
- 1;
363 q
->pkt_first_seg
= first_seg
;
364 q
->pkt_last_seg
= last_seg
;
370 fm10k_dev_rx_descriptor_done(void *rx_queue
, uint16_t offset
)
372 volatile union fm10k_rx_desc
*rxdp
;
373 struct fm10k_rx_queue
*rxq
= rx_queue
;
377 if (unlikely(offset
>= rxq
->nb_desc
)) {
378 PMD_DRV_LOG(ERR
, "Invalid RX descriptor offset %u", offset
);
382 desc
= rxq
->next_dd
+ offset
;
383 if (desc
>= rxq
->nb_desc
)
384 desc
-= rxq
->nb_desc
;
386 rxdp
= &rxq
->hw_ring
[desc
];
388 ret
= !!(rxdp
->w
.status
&
389 rte_cpu_to_le_16(FM10K_RXD_STATUS_DD
));
395 fm10k_dev_rx_descriptor_status(void *rx_queue
, uint16_t offset
)
397 volatile union fm10k_rx_desc
*rxdp
;
398 struct fm10k_rx_queue
*rxq
= rx_queue
;
399 uint16_t nb_hold
, trigger_last
;
403 if (unlikely(offset
>= rxq
->nb_desc
)) {
404 PMD_DRV_LOG(ERR
, "Invalid RX descriptor offset %u", offset
);
408 if (rxq
->next_trigger
< rxq
->alloc_thresh
)
409 trigger_last
= rxq
->next_trigger
+
410 rxq
->nb_desc
- rxq
->alloc_thresh
;
412 trigger_last
= rxq
->next_trigger
- rxq
->alloc_thresh
;
414 if (rxq
->next_dd
< trigger_last
)
415 nb_hold
= rxq
->next_dd
+ rxq
->nb_desc
- trigger_last
;
417 nb_hold
= rxq
->next_dd
- trigger_last
;
419 if (offset
>= rxq
->nb_desc
- nb_hold
)
420 return RTE_ETH_RX_DESC_UNAVAIL
;
422 desc
= rxq
->next_dd
+ offset
;
423 if (desc
>= rxq
->nb_desc
)
424 desc
-= rxq
->nb_desc
;
426 rxdp
= &rxq
->hw_ring
[desc
];
428 ret
= !!(rxdp
->w
.status
&
429 rte_cpu_to_le_16(FM10K_RXD_STATUS_DD
));
435 fm10k_dev_tx_descriptor_status(void *tx_queue
, uint16_t offset
)
437 volatile struct fm10k_tx_desc
*txdp
;
438 struct fm10k_tx_queue
*txq
= tx_queue
;
440 uint16_t next_rs
= txq
->nb_desc
;
441 struct fifo rs_tracker
= txq
->rs_tracker
;
442 struct fifo
*r
= &rs_tracker
;
444 if (unlikely(offset
>= txq
->nb_desc
))
447 desc
= txq
->next_free
+ offset
;
448 /* go to next desc that has the RS bit */
449 desc
= (desc
/ txq
->rs_thresh
+ 1) *
452 if (desc
>= txq
->nb_desc
) {
453 desc
-= txq
->nb_desc
;
454 if (desc
>= txq
->nb_desc
)
455 desc
-= txq
->nb_desc
;
459 for ( ; r
->head
!= r
->endp
; ) {
460 if (*r
->head
>= desc
&& *r
->head
< next_rs
)
465 txdp
= &txq
->hw_ring
[next_rs
];
466 if (txdp
->flags
& FM10K_TXD_FLAG_DONE
)
467 return RTE_ETH_TX_DESC_DONE
;
469 return RTE_ETH_TX_DESC_FULL
;
473 * Free multiple TX mbuf at a time if they are in the same pool
475 * @txep: software desc ring index that starts to free
476 * @num: number of descs to free
479 static inline void tx_free_bulk_mbuf(struct rte_mbuf
**txep
, int num
)
481 struct rte_mbuf
*m
, *free
[RTE_FM10K_TX_MAX_FREE_BUF_SZ
];
485 if (unlikely(num
== 0))
488 m
= rte_pktmbuf_prefree_seg(txep
[0]);
489 if (likely(m
!= NULL
)) {
492 for (i
= 1; i
< num
; i
++) {
493 m
= rte_pktmbuf_prefree_seg(txep
[i
]);
494 if (likely(m
!= NULL
)) {
495 if (likely(m
->pool
== free
[0]->pool
))
498 rte_mempool_put_bulk(free
[0]->pool
,
499 (void *)free
, nb_free
);
506 rte_mempool_put_bulk(free
[0]->pool
, (void **)free
, nb_free
);
508 for (i
= 1; i
< num
; i
++) {
509 m
= rte_pktmbuf_prefree_seg(txep
[i
]);
511 rte_mempool_put(m
->pool
, m
);
517 static inline void tx_free_descriptors(struct fm10k_tx_queue
*q
)
519 uint16_t next_rs
, count
= 0;
521 next_rs
= fifo_peek(&q
->rs_tracker
);
522 if (!(q
->hw_ring
[next_rs
].flags
& FM10K_TXD_FLAG_DONE
))
525 /* the DONE flag is set on this descriptor so remove the ID
526 * from the RS bit tracker and free the buffers */
527 fifo_remove(&q
->rs_tracker
);
529 /* wrap around? if so, free buffers from last_free up to but NOT
530 * including nb_desc */
531 if (q
->last_free
> next_rs
) {
532 count
= q
->nb_desc
- q
->last_free
;
533 tx_free_bulk_mbuf(&q
->sw_ring
[q
->last_free
], count
);
537 /* adjust free descriptor count before the next loop */
538 q
->nb_free
+= count
+ (next_rs
+ 1 - q
->last_free
);
540 /* free buffers from last_free, up to and including next_rs */
541 if (q
->last_free
<= next_rs
) {
542 count
= next_rs
- q
->last_free
+ 1;
543 tx_free_bulk_mbuf(&q
->sw_ring
[q
->last_free
], count
);
544 q
->last_free
+= count
;
547 if (q
->last_free
== q
->nb_desc
)
551 static inline void tx_xmit_pkt(struct fm10k_tx_queue
*q
, struct rte_mbuf
*mb
)
554 uint8_t flags
, hdrlen
;
556 /* always set the LAST flag on the last descriptor used to
557 * transmit the packet */
558 flags
= FM10K_TXD_FLAG_LAST
;
559 last_id
= q
->next_free
+ mb
->nb_segs
- 1;
560 if (last_id
>= q
->nb_desc
)
561 last_id
= last_id
- q
->nb_desc
;
563 /* but only set the RS flag on the last descriptor if rs_thresh
564 * descriptors will be used since the RS flag was last set */
565 if ((q
->nb_used
+ mb
->nb_segs
) >= q
->rs_thresh
) {
566 flags
|= FM10K_TXD_FLAG_RS
;
567 fifo_insert(&q
->rs_tracker
, last_id
);
570 q
->nb_used
= q
->nb_used
+ mb
->nb_segs
;
573 q
->nb_free
-= mb
->nb_segs
;
575 q
->hw_ring
[q
->next_free
].flags
= 0;
577 q
->hw_ring
[q
->next_free
].flags
|= FM10K_TXD_FLAG_FTAG
;
578 /* set checksum flags on first descriptor of packet. SCTP checksum
579 * offload is not supported, but we do not explicitly check for this
580 * case in favor of greatly simplified processing. */
581 if (mb
->ol_flags
& (PKT_TX_IP_CKSUM
| PKT_TX_L4_MASK
| PKT_TX_TCP_SEG
))
582 q
->hw_ring
[q
->next_free
].flags
|= FM10K_TXD_FLAG_CSUM
;
584 /* set vlan if requested */
585 if (mb
->ol_flags
& PKT_TX_VLAN_PKT
)
586 q
->hw_ring
[q
->next_free
].vlan
= mb
->vlan_tci
;
588 q
->sw_ring
[q
->next_free
] = mb
;
589 q
->hw_ring
[q
->next_free
].buffer_addr
=
590 rte_cpu_to_le_64(MBUF_DMA_ADDR(mb
));
591 q
->hw_ring
[q
->next_free
].buflen
=
592 rte_cpu_to_le_16(rte_pktmbuf_data_len(mb
));
594 if (mb
->ol_flags
& PKT_TX_TCP_SEG
) {
595 hdrlen
= mb
->outer_l2_len
+ mb
->outer_l3_len
+ mb
->l2_len
+
596 mb
->l3_len
+ mb
->l4_len
;
597 if (q
->hw_ring
[q
->next_free
].flags
& FM10K_TXD_FLAG_FTAG
)
598 hdrlen
+= sizeof(struct fm10k_ftag
);
600 if (likely((hdrlen
>= FM10K_TSO_MIN_HEADERLEN
) &&
601 (hdrlen
<= FM10K_TSO_MAX_HEADERLEN
) &&
602 (mb
->tso_segsz
>= FM10K_TSO_MINMSS
))) {
603 q
->hw_ring
[q
->next_free
].mss
= mb
->tso_segsz
;
604 q
->hw_ring
[q
->next_free
].hdrlen
= hdrlen
;
608 if (++q
->next_free
== q
->nb_desc
)
611 /* fill up the rings */
612 for (mb
= mb
->next
; mb
!= NULL
; mb
= mb
->next
) {
613 q
->sw_ring
[q
->next_free
] = mb
;
614 q
->hw_ring
[q
->next_free
].buffer_addr
=
615 rte_cpu_to_le_64(MBUF_DMA_ADDR(mb
));
616 q
->hw_ring
[q
->next_free
].buflen
=
617 rte_cpu_to_le_16(rte_pktmbuf_data_len(mb
));
618 q
->hw_ring
[q
->next_free
].flags
= 0;
619 if (++q
->next_free
== q
->nb_desc
)
623 q
->hw_ring
[last_id
].flags
|= flags
;
627 fm10k_xmit_pkts(void *tx_queue
, struct rte_mbuf
**tx_pkts
,
630 struct fm10k_tx_queue
*q
= tx_queue
;
634 for (count
= 0; count
< nb_pkts
; ++count
) {
637 /* running low on descriptors? try to free some... */
638 if (q
->nb_free
< q
->free_thresh
)
639 tx_free_descriptors(q
);
641 /* make sure there are enough free descriptors to transmit the
642 * entire packet before doing anything */
643 if (q
->nb_free
< mb
->nb_segs
)
646 /* sanity check to make sure the mbuf is valid */
647 if ((mb
->nb_segs
== 0) ||
648 ((mb
->nb_segs
> 1) && (mb
->next
== NULL
)))
651 /* process the packet */
655 /* update the tail pointer if any packets were processed */
656 if (likely(count
> 0))
657 FM10K_PCI_REG_WRITE(q
->tail_ptr
, q
->next_free
);
663 fm10k_prep_pkts(__rte_unused
void *tx_queue
, struct rte_mbuf
**tx_pkts
,
669 for (i
= 0; i
< nb_pkts
; i
++) {
672 if ((m
->ol_flags
& PKT_TX_TCP_SEG
) &&
673 (m
->tso_segsz
< FM10K_TSO_MINMSS
)) {
678 if (m
->ol_flags
& FM10K_TX_OFFLOAD_NOTSUP_MASK
) {
679 rte_errno
= -ENOTSUP
;
683 #ifdef RTE_LIBRTE_ETHDEV_DEBUG
684 ret
= rte_validate_tx_offload(m
);
690 ret
= rte_net_intel_cksum_prepare(m
);