1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2010-2014 Intel Corporation
11 #include <rte_cycles.h>
12 #include <rte_memory.h>
13 #include <rte_branch_prediction.h>
14 #include <rte_mempool.h>
15 #include <rte_malloc.h>
17 #include <rte_ether.h>
18 #include <rte_ethdev_driver.h>
19 #include <rte_prefetch.h>
20 #include <rte_string_fns.h>
21 #include <rte_errno.h>
22 #include <rte_byteorder.h>
28 #include "virtio_logs.h"
29 #include "virtio_ethdev.h"
30 #include "virtio_pci.h"
31 #include "virtqueue.h"
32 #include "virtio_rxtx.h"
33 #include "virtio_rxtx_simple.h"
35 #ifdef RTE_LIBRTE_VIRTIO_DEBUG_DUMP
36 #define VIRTIO_DUMP_PACKET(m, len) rte_pktmbuf_dump(stdout, m, len)
38 #define VIRTIO_DUMP_PACKET(m, len) do { } while (0)
42 virtio_dev_rx_queue_done(void *rxq
, uint16_t offset
)
44 struct virtnet_rx
*rxvq
= rxq
;
45 struct virtqueue
*vq
= rxvq
->vq
;
47 return VIRTQUEUE_NUSED(vq
) >= offset
;
51 vq_ring_free_inorder(struct virtqueue
*vq
, uint16_t desc_idx
, uint16_t num
)
53 vq
->vq_free_cnt
+= num
;
54 vq
->vq_desc_tail_idx
= desc_idx
& (vq
->vq_nentries
- 1);
58 vq_ring_free_chain(struct virtqueue
*vq
, uint16_t desc_idx
)
60 struct vring_desc
*dp
, *dp_tail
;
61 struct vq_desc_extra
*dxp
;
62 uint16_t desc_idx_last
= desc_idx
;
64 dp
= &vq
->vq_ring
.desc
[desc_idx
];
65 dxp
= &vq
->vq_descx
[desc_idx
];
66 vq
->vq_free_cnt
= (uint16_t)(vq
->vq_free_cnt
+ dxp
->ndescs
);
67 if ((dp
->flags
& VRING_DESC_F_INDIRECT
) == 0) {
68 while (dp
->flags
& VRING_DESC_F_NEXT
) {
69 desc_idx_last
= dp
->next
;
70 dp
= &vq
->vq_ring
.desc
[dp
->next
];
76 * We must append the existing free chain, if any, to the end of
77 * newly freed chain. If the virtqueue was completely used, then
78 * head would be VQ_RING_DESC_CHAIN_END (ASSERTed above).
80 if (vq
->vq_desc_tail_idx
== VQ_RING_DESC_CHAIN_END
) {
81 vq
->vq_desc_head_idx
= desc_idx
;
83 dp_tail
= &vq
->vq_ring
.desc
[vq
->vq_desc_tail_idx
];
84 dp_tail
->next
= desc_idx
;
87 vq
->vq_desc_tail_idx
= desc_idx_last
;
88 dp
->next
= VQ_RING_DESC_CHAIN_END
;
92 virtqueue_dequeue_burst_rx(struct virtqueue
*vq
, struct rte_mbuf
**rx_pkts
,
93 uint32_t *len
, uint16_t num
)
95 struct vring_used_elem
*uep
;
96 struct rte_mbuf
*cookie
;
97 uint16_t used_idx
, desc_idx
;
100 /* Caller does the check */
101 for (i
= 0; i
< num
; i
++) {
102 used_idx
= (uint16_t)(vq
->vq_used_cons_idx
& (vq
->vq_nentries
- 1));
103 uep
= &vq
->vq_ring
.used
->ring
[used_idx
];
104 desc_idx
= (uint16_t) uep
->id
;
106 cookie
= (struct rte_mbuf
*)vq
->vq_descx
[desc_idx
].cookie
;
108 if (unlikely(cookie
== NULL
)) {
109 PMD_DRV_LOG(ERR
, "vring descriptor with no mbuf cookie at %u",
110 vq
->vq_used_cons_idx
);
114 rte_prefetch0(cookie
);
115 rte_packet_prefetch(rte_pktmbuf_mtod(cookie
, void *));
117 vq
->vq_used_cons_idx
++;
118 vq_ring_free_chain(vq
, desc_idx
);
119 vq
->vq_descx
[desc_idx
].cookie
= NULL
;
126 virtqueue_dequeue_rx_inorder(struct virtqueue
*vq
,
127 struct rte_mbuf
**rx_pkts
,
131 struct vring_used_elem
*uep
;
132 struct rte_mbuf
*cookie
;
133 uint16_t used_idx
= 0;
136 if (unlikely(num
== 0))
139 for (i
= 0; i
< num
; i
++) {
140 used_idx
= vq
->vq_used_cons_idx
& (vq
->vq_nentries
- 1);
141 /* Desc idx same as used idx */
142 uep
= &vq
->vq_ring
.used
->ring
[used_idx
];
144 cookie
= (struct rte_mbuf
*)vq
->vq_descx
[used_idx
].cookie
;
146 if (unlikely(cookie
== NULL
)) {
147 PMD_DRV_LOG(ERR
, "vring descriptor with no mbuf cookie at %u",
148 vq
->vq_used_cons_idx
);
152 rte_prefetch0(cookie
);
153 rte_packet_prefetch(rte_pktmbuf_mtod(cookie
, void *));
155 vq
->vq_used_cons_idx
++;
156 vq
->vq_descx
[used_idx
].cookie
= NULL
;
159 vq_ring_free_inorder(vq
, used_idx
, i
);
163 #ifndef DEFAULT_TX_FREE_THRESH
164 #define DEFAULT_TX_FREE_THRESH 32
167 /* Cleanup from completed transmits. */
169 virtio_xmit_cleanup(struct virtqueue
*vq
, uint16_t num
)
171 uint16_t i
, used_idx
, desc_idx
;
172 for (i
= 0; i
< num
; i
++) {
173 struct vring_used_elem
*uep
;
174 struct vq_desc_extra
*dxp
;
176 used_idx
= (uint16_t)(vq
->vq_used_cons_idx
& (vq
->vq_nentries
- 1));
177 uep
= &vq
->vq_ring
.used
->ring
[used_idx
];
179 desc_idx
= (uint16_t) uep
->id
;
180 dxp
= &vq
->vq_descx
[desc_idx
];
181 vq
->vq_used_cons_idx
++;
182 vq_ring_free_chain(vq
, desc_idx
);
184 if (dxp
->cookie
!= NULL
) {
185 rte_pktmbuf_free(dxp
->cookie
);
191 /* Cleanup from completed inorder transmits. */
193 virtio_xmit_cleanup_inorder(struct virtqueue
*vq
, uint16_t num
)
195 uint16_t i
, used_idx
, desc_idx
= 0, last_idx
;
196 int16_t free_cnt
= 0;
197 struct vq_desc_extra
*dxp
= NULL
;
199 if (unlikely(num
== 0))
202 for (i
= 0; i
< num
; i
++) {
203 struct vring_used_elem
*uep
;
205 used_idx
= vq
->vq_used_cons_idx
& (vq
->vq_nentries
- 1);
206 uep
= &vq
->vq_ring
.used
->ring
[used_idx
];
207 desc_idx
= (uint16_t)uep
->id
;
209 dxp
= &vq
->vq_descx
[desc_idx
];
210 vq
->vq_used_cons_idx
++;
212 if (dxp
->cookie
!= NULL
) {
213 rte_pktmbuf_free(dxp
->cookie
);
218 last_idx
= desc_idx
+ dxp
->ndescs
- 1;
219 free_cnt
= last_idx
- vq
->vq_desc_tail_idx
;
221 free_cnt
+= vq
->vq_nentries
;
223 vq_ring_free_inorder(vq
, last_idx
, free_cnt
);
227 virtqueue_enqueue_refill_inorder(struct virtqueue
*vq
,
228 struct rte_mbuf
**cookies
,
231 struct vq_desc_extra
*dxp
;
232 struct virtio_hw
*hw
= vq
->hw
;
233 struct vring_desc
*start_dp
;
234 uint16_t head_idx
, idx
, i
= 0;
236 if (unlikely(vq
->vq_free_cnt
== 0))
238 if (unlikely(vq
->vq_free_cnt
< num
))
241 head_idx
= vq
->vq_desc_head_idx
& (vq
->vq_nentries
- 1);
242 start_dp
= vq
->vq_ring
.desc
;
245 idx
= head_idx
& (vq
->vq_nentries
- 1);
246 dxp
= &vq
->vq_descx
[idx
];
247 dxp
->cookie
= (void *)cookies
[i
];
251 VIRTIO_MBUF_ADDR(cookies
[i
], vq
) +
252 RTE_PKTMBUF_HEADROOM
- hw
->vtnet_hdr_size
;
254 cookies
[i
]->buf_len
-
255 RTE_PKTMBUF_HEADROOM
+
257 start_dp
[idx
].flags
= VRING_DESC_F_WRITE
;
259 vq_update_avail_ring(vq
, idx
);
264 vq
->vq_desc_head_idx
+= num
;
265 vq
->vq_free_cnt
= (uint16_t)(vq
->vq_free_cnt
- num
);
270 virtqueue_enqueue_recv_refill(struct virtqueue
*vq
, struct rte_mbuf
*cookie
)
272 struct vq_desc_extra
*dxp
;
273 struct virtio_hw
*hw
= vq
->hw
;
274 struct vring_desc
*start_dp
;
276 uint16_t head_idx
, idx
;
278 if (unlikely(vq
->vq_free_cnt
== 0))
280 if (unlikely(vq
->vq_free_cnt
< needed
))
283 head_idx
= vq
->vq_desc_head_idx
;
284 if (unlikely(head_idx
>= vq
->vq_nentries
))
288 dxp
= &vq
->vq_descx
[idx
];
289 dxp
->cookie
= (void *)cookie
;
290 dxp
->ndescs
= needed
;
292 start_dp
= vq
->vq_ring
.desc
;
294 VIRTIO_MBUF_ADDR(cookie
, vq
) +
295 RTE_PKTMBUF_HEADROOM
- hw
->vtnet_hdr_size
;
297 cookie
->buf_len
- RTE_PKTMBUF_HEADROOM
+ hw
->vtnet_hdr_size
;
298 start_dp
[idx
].flags
= VRING_DESC_F_WRITE
;
299 idx
= start_dp
[idx
].next
;
300 vq
->vq_desc_head_idx
= idx
;
301 if (vq
->vq_desc_head_idx
== VQ_RING_DESC_CHAIN_END
)
302 vq
->vq_desc_tail_idx
= idx
;
303 vq
->vq_free_cnt
= (uint16_t)(vq
->vq_free_cnt
- needed
);
304 vq_update_avail_ring(vq
, head_idx
);
309 /* When doing TSO, the IP length is not included in the pseudo header
310 * checksum of the packet given to the PMD, but for virtio it is
314 virtio_tso_fix_cksum(struct rte_mbuf
*m
)
316 /* common case: header is not fragmented */
317 if (likely(rte_pktmbuf_data_len(m
) >= m
->l2_len
+ m
->l3_len
+
319 struct ipv4_hdr
*iph
;
320 struct ipv6_hdr
*ip6h
;
322 uint16_t prev_cksum
, new_cksum
, ip_len
, ip_paylen
;
325 iph
= rte_pktmbuf_mtod_offset(m
, struct ipv4_hdr
*, m
->l2_len
);
326 th
= RTE_PTR_ADD(iph
, m
->l3_len
);
327 if ((iph
->version_ihl
>> 4) == 4) {
328 iph
->hdr_checksum
= 0;
329 iph
->hdr_checksum
= rte_ipv4_cksum(iph
);
330 ip_len
= iph
->total_length
;
331 ip_paylen
= rte_cpu_to_be_16(rte_be_to_cpu_16(ip_len
) -
334 ip6h
= (struct ipv6_hdr
*)iph
;
335 ip_paylen
= ip6h
->payload_len
;
338 /* calculate the new phdr checksum not including ip_paylen */
339 prev_cksum
= th
->cksum
;
342 tmp
= (tmp
& 0xffff) + (tmp
>> 16);
345 /* replace it in the packet */
346 th
->cksum
= new_cksum
;
351 /* avoid write operation when necessary, to lessen cache issues */
352 #define ASSIGN_UNLESS_EQUAL(var, val) do { \
353 if ((var) != (val)) \
358 virtqueue_xmit_offload(struct virtio_net_hdr
*hdr
,
359 struct rte_mbuf
*cookie
,
363 if (cookie
->ol_flags
& PKT_TX_TCP_SEG
)
364 cookie
->ol_flags
|= PKT_TX_TCP_CKSUM
;
366 switch (cookie
->ol_flags
& PKT_TX_L4_MASK
) {
367 case PKT_TX_UDP_CKSUM
:
368 hdr
->csum_start
= cookie
->l2_len
+ cookie
->l3_len
;
369 hdr
->csum_offset
= offsetof(struct udp_hdr
,
371 hdr
->flags
= VIRTIO_NET_HDR_F_NEEDS_CSUM
;
374 case PKT_TX_TCP_CKSUM
:
375 hdr
->csum_start
= cookie
->l2_len
+ cookie
->l3_len
;
376 hdr
->csum_offset
= offsetof(struct tcp_hdr
, cksum
);
377 hdr
->flags
= VIRTIO_NET_HDR_F_NEEDS_CSUM
;
381 ASSIGN_UNLESS_EQUAL(hdr
->csum_start
, 0);
382 ASSIGN_UNLESS_EQUAL(hdr
->csum_offset
, 0);
383 ASSIGN_UNLESS_EQUAL(hdr
->flags
, 0);
387 /* TCP Segmentation Offload */
388 if (cookie
->ol_flags
& PKT_TX_TCP_SEG
) {
389 virtio_tso_fix_cksum(cookie
);
390 hdr
->gso_type
= (cookie
->ol_flags
& PKT_TX_IPV6
) ?
391 VIRTIO_NET_HDR_GSO_TCPV6
:
392 VIRTIO_NET_HDR_GSO_TCPV4
;
393 hdr
->gso_size
= cookie
->tso_segsz
;
399 ASSIGN_UNLESS_EQUAL(hdr
->gso_type
, 0);
400 ASSIGN_UNLESS_EQUAL(hdr
->gso_size
, 0);
401 ASSIGN_UNLESS_EQUAL(hdr
->hdr_len
, 0);
407 virtqueue_enqueue_xmit_inorder(struct virtnet_tx
*txvq
,
408 struct rte_mbuf
**cookies
,
411 struct vq_desc_extra
*dxp
;
412 struct virtqueue
*vq
= txvq
->vq
;
413 struct vring_desc
*start_dp
;
414 struct virtio_net_hdr
*hdr
;
416 uint16_t head_size
= vq
->hw
->vtnet_hdr_size
;
419 idx
= vq
->vq_desc_head_idx
;
420 start_dp
= vq
->vq_ring
.desc
;
423 idx
= idx
& (vq
->vq_nentries
- 1);
424 dxp
= &vq
->vq_descx
[idx
];
425 dxp
->cookie
= (void *)cookies
[i
];
428 hdr
= (struct virtio_net_hdr
*)
429 rte_pktmbuf_prepend(cookies
[i
], head_size
);
430 cookies
[i
]->pkt_len
-= head_size
;
432 /* if offload disabled, it is not zeroed below, do it now */
433 if (!vq
->hw
->has_tx_offload
) {
434 ASSIGN_UNLESS_EQUAL(hdr
->csum_start
, 0);
435 ASSIGN_UNLESS_EQUAL(hdr
->csum_offset
, 0);
436 ASSIGN_UNLESS_EQUAL(hdr
->flags
, 0);
437 ASSIGN_UNLESS_EQUAL(hdr
->gso_type
, 0);
438 ASSIGN_UNLESS_EQUAL(hdr
->gso_size
, 0);
439 ASSIGN_UNLESS_EQUAL(hdr
->hdr_len
, 0);
442 virtqueue_xmit_offload(hdr
, cookies
[i
],
443 vq
->hw
->has_tx_offload
);
445 start_dp
[idx
].addr
= VIRTIO_MBUF_DATA_DMA_ADDR(cookies
[i
], vq
);
446 start_dp
[idx
].len
= cookies
[i
]->data_len
;
447 start_dp
[idx
].flags
= 0;
449 vq_update_avail_ring(vq
, idx
);
455 vq
->vq_free_cnt
= (uint16_t)(vq
->vq_free_cnt
- num
);
456 vq
->vq_desc_head_idx
= idx
& (vq
->vq_nentries
- 1);
460 virtqueue_enqueue_xmit(struct virtnet_tx
*txvq
, struct rte_mbuf
*cookie
,
461 uint16_t needed
, int use_indirect
, int can_push
,
464 struct virtio_tx_region
*txr
= txvq
->virtio_net_hdr_mz
->addr
;
465 struct vq_desc_extra
*dxp
;
466 struct virtqueue
*vq
= txvq
->vq
;
467 struct vring_desc
*start_dp
;
468 uint16_t seg_num
= cookie
->nb_segs
;
469 uint16_t head_idx
, idx
;
470 uint16_t head_size
= vq
->hw
->vtnet_hdr_size
;
471 struct virtio_net_hdr
*hdr
;
473 head_idx
= vq
->vq_desc_head_idx
;
475 dxp
= &vq
->vq_descx
[idx
];
476 dxp
->cookie
= (void *)cookie
;
477 dxp
->ndescs
= needed
;
479 start_dp
= vq
->vq_ring
.desc
;
482 /* prepend cannot fail, checked by caller */
483 hdr
= (struct virtio_net_hdr
*)
484 rte_pktmbuf_prepend(cookie
, head_size
);
485 /* rte_pktmbuf_prepend() counts the hdr size to the pkt length,
486 * which is wrong. Below subtract restores correct pkt size.
488 cookie
->pkt_len
-= head_size
;
490 /* if offload disabled, it is not zeroed below, do it now */
491 if (!vq
->hw
->has_tx_offload
) {
492 ASSIGN_UNLESS_EQUAL(hdr
->csum_start
, 0);
493 ASSIGN_UNLESS_EQUAL(hdr
->csum_offset
, 0);
494 ASSIGN_UNLESS_EQUAL(hdr
->flags
, 0);
495 ASSIGN_UNLESS_EQUAL(hdr
->gso_type
, 0);
496 ASSIGN_UNLESS_EQUAL(hdr
->gso_size
, 0);
497 ASSIGN_UNLESS_EQUAL(hdr
->hdr_len
, 0);
499 } else if (use_indirect
) {
500 /* setup tx ring slot to point to indirect
501 * descriptor list stored in reserved region.
503 * the first slot in indirect ring is already preset
504 * to point to the header in reserved region
506 start_dp
[idx
].addr
= txvq
->virtio_net_hdr_mem
+
507 RTE_PTR_DIFF(&txr
[idx
].tx_indir
, txr
);
508 start_dp
[idx
].len
= (seg_num
+ 1) * sizeof(struct vring_desc
);
509 start_dp
[idx
].flags
= VRING_DESC_F_INDIRECT
;
510 hdr
= (struct virtio_net_hdr
*)&txr
[idx
].tx_hdr
;
512 /* loop below will fill in rest of the indirect elements */
513 start_dp
= txr
[idx
].tx_indir
;
516 /* setup first tx ring slot to point to header
517 * stored in reserved region.
519 start_dp
[idx
].addr
= txvq
->virtio_net_hdr_mem
+
520 RTE_PTR_DIFF(&txr
[idx
].tx_hdr
, txr
);
521 start_dp
[idx
].len
= vq
->hw
->vtnet_hdr_size
;
522 start_dp
[idx
].flags
= VRING_DESC_F_NEXT
;
523 hdr
= (struct virtio_net_hdr
*)&txr
[idx
].tx_hdr
;
525 idx
= start_dp
[idx
].next
;
528 virtqueue_xmit_offload(hdr
, cookie
, vq
->hw
->has_tx_offload
);
531 start_dp
[idx
].addr
= VIRTIO_MBUF_DATA_DMA_ADDR(cookie
, vq
);
532 start_dp
[idx
].len
= cookie
->data_len
;
533 start_dp
[idx
].flags
= cookie
->next
? VRING_DESC_F_NEXT
: 0;
534 idx
= start_dp
[idx
].next
;
535 } while ((cookie
= cookie
->next
) != NULL
);
538 idx
= vq
->vq_ring
.desc
[head_idx
].next
;
540 vq
->vq_free_cnt
= (uint16_t)(vq
->vq_free_cnt
- needed
);
542 vq
->vq_desc_head_idx
= idx
;
543 vq_update_avail_ring(vq
, head_idx
);
546 if (vq
->vq_desc_head_idx
== VQ_RING_DESC_CHAIN_END
)
547 vq
->vq_desc_tail_idx
= idx
;
552 virtio_dev_cq_start(struct rte_eth_dev
*dev
)
554 struct virtio_hw
*hw
= dev
->data
->dev_private
;
556 if (hw
->cvq
&& hw
->cvq
->vq
) {
557 rte_spinlock_init(&hw
->cvq
->lock
);
558 VIRTQUEUE_DUMP((struct virtqueue
*)hw
->cvq
->vq
);
563 virtio_dev_rx_queue_setup(struct rte_eth_dev
*dev
,
566 unsigned int socket_id __rte_unused
,
567 const struct rte_eth_rxconf
*rx_conf __rte_unused
,
568 struct rte_mempool
*mp
)
570 uint16_t vtpci_queue_idx
= 2 * queue_idx
+ VTNET_SQ_RQ_QUEUE_IDX
;
571 struct virtio_hw
*hw
= dev
->data
->dev_private
;
572 struct virtqueue
*vq
= hw
->vqs
[vtpci_queue_idx
];
573 struct virtnet_rx
*rxvq
;
575 PMD_INIT_FUNC_TRACE();
577 if (nb_desc
== 0 || nb_desc
> vq
->vq_nentries
)
578 nb_desc
= vq
->vq_nentries
;
579 vq
->vq_free_cnt
= RTE_MIN(vq
->vq_free_cnt
, nb_desc
);
582 rxvq
->queue_id
= queue_idx
;
584 if (rxvq
->mpool
== NULL
) {
585 rte_exit(EXIT_FAILURE
,
586 "Cannot allocate mbufs for rx virtqueue");
589 dev
->data
->rx_queues
[queue_idx
] = rxvq
;
595 virtio_dev_rx_queue_setup_finish(struct rte_eth_dev
*dev
, uint16_t queue_idx
)
597 uint16_t vtpci_queue_idx
= 2 * queue_idx
+ VTNET_SQ_RQ_QUEUE_IDX
;
598 struct virtio_hw
*hw
= dev
->data
->dev_private
;
599 struct virtqueue
*vq
= hw
->vqs
[vtpci_queue_idx
];
600 struct virtnet_rx
*rxvq
= &vq
->rxq
;
605 PMD_INIT_FUNC_TRACE();
607 /* Allocate blank mbufs for the each rx descriptor */
610 if (hw
->use_simple_rx
) {
611 for (desc_idx
= 0; desc_idx
< vq
->vq_nentries
;
613 vq
->vq_ring
.avail
->ring
[desc_idx
] = desc_idx
;
614 vq
->vq_ring
.desc
[desc_idx
].flags
=
618 virtio_rxq_vec_setup(rxvq
);
621 memset(&rxvq
->fake_mbuf
, 0, sizeof(rxvq
->fake_mbuf
));
622 for (desc_idx
= 0; desc_idx
< RTE_PMD_VIRTIO_RX_MAX_BURST
;
624 vq
->sw_ring
[vq
->vq_nentries
+ desc_idx
] =
628 if (hw
->use_simple_rx
) {
629 while (vq
->vq_free_cnt
>= RTE_VIRTIO_VPMD_RX_REARM_THRESH
) {
630 virtio_rxq_rearm_vec(rxvq
);
631 nbufs
+= RTE_VIRTIO_VPMD_RX_REARM_THRESH
;
633 } else if (hw
->use_inorder_rx
) {
634 if ((!virtqueue_full(vq
))) {
635 uint16_t free_cnt
= vq
->vq_free_cnt
;
636 struct rte_mbuf
*pkts
[free_cnt
];
638 if (!rte_pktmbuf_alloc_bulk(rxvq
->mpool
, pkts
,
640 error
= virtqueue_enqueue_refill_inorder(vq
,
643 if (unlikely(error
)) {
644 for (i
= 0; i
< free_cnt
; i
++)
645 rte_pktmbuf_free(pkts
[i
]);
650 vq_update_avail_idx(vq
);
653 while (!virtqueue_full(vq
)) {
654 m
= rte_mbuf_raw_alloc(rxvq
->mpool
);
658 /* Enqueue allocated buffers */
659 error
= virtqueue_enqueue_recv_refill(vq
, m
);
667 vq_update_avail_idx(vq
);
670 PMD_INIT_LOG(DEBUG
, "Allocated %d bufs", nbufs
);
678 * struct rte_eth_dev *dev: Used to update dev
679 * uint16_t nb_desc: Defaults to values read from config space
680 * unsigned int socket_id: Used to allocate memzone
681 * const struct rte_eth_txconf *tx_conf: Used to setup tx engine
682 * uint16_t queue_idx: Just used as an index in dev txq list
685 virtio_dev_tx_queue_setup(struct rte_eth_dev
*dev
,
688 unsigned int socket_id __rte_unused
,
689 const struct rte_eth_txconf
*tx_conf
)
691 uint8_t vtpci_queue_idx
= 2 * queue_idx
+ VTNET_SQ_TQ_QUEUE_IDX
;
692 struct virtio_hw
*hw
= dev
->data
->dev_private
;
693 struct virtqueue
*vq
= hw
->vqs
[vtpci_queue_idx
];
694 struct virtnet_tx
*txvq
;
695 uint16_t tx_free_thresh
;
697 PMD_INIT_FUNC_TRACE();
699 if (nb_desc
== 0 || nb_desc
> vq
->vq_nentries
)
700 nb_desc
= vq
->vq_nentries
;
701 vq
->vq_free_cnt
= RTE_MIN(vq
->vq_free_cnt
, nb_desc
);
704 txvq
->queue_id
= queue_idx
;
706 tx_free_thresh
= tx_conf
->tx_free_thresh
;
707 if (tx_free_thresh
== 0)
709 RTE_MIN(vq
->vq_nentries
/ 4, DEFAULT_TX_FREE_THRESH
);
711 if (tx_free_thresh
>= (vq
->vq_nentries
- 3)) {
712 RTE_LOG(ERR
, PMD
, "tx_free_thresh must be less than the "
713 "number of TX entries minus 3 (%u)."
714 " (tx_free_thresh=%u port=%u queue=%u)\n",
716 tx_free_thresh
, dev
->data
->port_id
, queue_idx
);
720 vq
->vq_free_thresh
= tx_free_thresh
;
722 dev
->data
->tx_queues
[queue_idx
] = txvq
;
727 virtio_dev_tx_queue_setup_finish(struct rte_eth_dev
*dev
,
730 uint8_t vtpci_queue_idx
= 2 * queue_idx
+ VTNET_SQ_TQ_QUEUE_IDX
;
731 struct virtio_hw
*hw
= dev
->data
->dev_private
;
732 struct virtqueue
*vq
= hw
->vqs
[vtpci_queue_idx
];
734 PMD_INIT_FUNC_TRACE();
736 if (hw
->use_inorder_tx
)
737 vq
->vq_ring
.desc
[vq
->vq_nentries
- 1].next
= 0;
745 virtio_discard_rxbuf(struct virtqueue
*vq
, struct rte_mbuf
*m
)
749 * Requeue the discarded mbuf. This should always be
750 * successful since it was just dequeued.
752 error
= virtqueue_enqueue_recv_refill(vq
, m
);
754 if (unlikely(error
)) {
755 RTE_LOG(ERR
, PMD
, "cannot requeue discarded mbuf");
761 virtio_discard_rxbuf_inorder(struct virtqueue
*vq
, struct rte_mbuf
*m
)
765 error
= virtqueue_enqueue_refill_inorder(vq
, &m
, 1);
766 if (unlikely(error
)) {
767 RTE_LOG(ERR
, PMD
, "cannot requeue discarded mbuf");
773 virtio_update_packet_stats(struct virtnet_stats
*stats
, struct rte_mbuf
*mbuf
)
775 uint32_t s
= mbuf
->pkt_len
;
776 struct ether_addr
*ea
;
779 stats
->size_bins
[1]++;
780 } else if (s
> 64 && s
< 1024) {
783 /* count zeros, and offset into correct bin */
784 bin
= (sizeof(s
) * 8) - __builtin_clz(s
) - 5;
785 stats
->size_bins
[bin
]++;
788 stats
->size_bins
[0]++;
790 stats
->size_bins
[6]++;
792 stats
->size_bins
[7]++;
795 ea
= rte_pktmbuf_mtod(mbuf
, struct ether_addr
*);
796 if (is_multicast_ether_addr(ea
)) {
797 if (is_broadcast_ether_addr(ea
))
805 virtio_rx_stats_updated(struct virtnet_rx
*rxvq
, struct rte_mbuf
*m
)
807 VIRTIO_DUMP_PACKET(m
, m
->data_len
);
809 rxvq
->stats
.bytes
+= m
->pkt_len
;
810 virtio_update_packet_stats(&rxvq
->stats
, m
);
813 /* Optionally fill offload information in structure */
815 virtio_rx_offload(struct rte_mbuf
*m
, struct virtio_net_hdr
*hdr
)
817 struct rte_net_hdr_lens hdr_lens
;
818 uint32_t hdrlen
, ptype
;
819 int l4_supported
= 0;
822 if (hdr
->flags
== 0 && hdr
->gso_type
== VIRTIO_NET_HDR_GSO_NONE
)
825 m
->ol_flags
|= PKT_RX_IP_CKSUM_UNKNOWN
;
827 ptype
= rte_net_get_ptype(m
, &hdr_lens
, RTE_PTYPE_ALL_MASK
);
828 m
->packet_type
= ptype
;
829 if ((ptype
& RTE_PTYPE_L4_MASK
) == RTE_PTYPE_L4_TCP
||
830 (ptype
& RTE_PTYPE_L4_MASK
) == RTE_PTYPE_L4_UDP
||
831 (ptype
& RTE_PTYPE_L4_MASK
) == RTE_PTYPE_L4_SCTP
)
834 if (hdr
->flags
& VIRTIO_NET_HDR_F_NEEDS_CSUM
) {
835 hdrlen
= hdr_lens
.l2_len
+ hdr_lens
.l3_len
+ hdr_lens
.l4_len
;
836 if (hdr
->csum_start
<= hdrlen
&& l4_supported
) {
837 m
->ol_flags
|= PKT_RX_L4_CKSUM_NONE
;
839 /* Unknown proto or tunnel, do sw cksum. We can assume
840 * the cksum field is in the first segment since the
841 * buffers we provided to the host are large enough.
842 * In case of SCTP, this will be wrong since it's a CRC
843 * but there's nothing we can do.
845 uint16_t csum
= 0, off
;
847 rte_raw_cksum_mbuf(m
, hdr
->csum_start
,
848 rte_pktmbuf_pkt_len(m
) - hdr
->csum_start
,
850 if (likely(csum
!= 0xffff))
852 off
= hdr
->csum_offset
+ hdr
->csum_start
;
853 if (rte_pktmbuf_data_len(m
) >= off
+ 1)
854 *rte_pktmbuf_mtod_offset(m
, uint16_t *,
857 } else if (hdr
->flags
& VIRTIO_NET_HDR_F_DATA_VALID
&& l4_supported
) {
858 m
->ol_flags
|= PKT_RX_L4_CKSUM_GOOD
;
861 /* GSO request, save required information in mbuf */
862 if (hdr
->gso_type
!= VIRTIO_NET_HDR_GSO_NONE
) {
863 /* Check unsupported modes */
864 if ((hdr
->gso_type
& VIRTIO_NET_HDR_GSO_ECN
) ||
865 (hdr
->gso_size
== 0)) {
869 /* Update mss lengthes in mbuf */
870 m
->tso_segsz
= hdr
->gso_size
;
871 switch (hdr
->gso_type
& ~VIRTIO_NET_HDR_GSO_ECN
) {
872 case VIRTIO_NET_HDR_GSO_TCPV4
:
873 case VIRTIO_NET_HDR_GSO_TCPV6
:
874 m
->ol_flags
|= PKT_RX_LRO
| \
875 PKT_RX_L4_CKSUM_NONE
;
885 #define VIRTIO_MBUF_BURST_SZ 64
886 #define DESC_PER_CACHELINE (RTE_CACHE_LINE_SIZE / sizeof(struct vring_desc))
888 virtio_recv_pkts(void *rx_queue
, struct rte_mbuf
**rx_pkts
, uint16_t nb_pkts
)
890 struct virtnet_rx
*rxvq
= rx_queue
;
891 struct virtqueue
*vq
= rxvq
->vq
;
892 struct virtio_hw
*hw
= vq
->hw
;
893 struct rte_mbuf
*rxm
, *new_mbuf
;
894 uint16_t nb_used
, num
, nb_rx
;
895 uint32_t len
[VIRTIO_MBUF_BURST_SZ
];
896 struct rte_mbuf
*rcv_pkts
[VIRTIO_MBUF_BURST_SZ
];
898 uint32_t i
, nb_enqueued
;
900 struct virtio_net_hdr
*hdr
;
903 if (unlikely(hw
->started
== 0))
906 nb_used
= VIRTQUEUE_NUSED(vq
);
910 num
= likely(nb_used
<= nb_pkts
) ? nb_used
: nb_pkts
;
911 if (unlikely(num
> VIRTIO_MBUF_BURST_SZ
))
912 num
= VIRTIO_MBUF_BURST_SZ
;
913 if (likely(num
> DESC_PER_CACHELINE
))
914 num
= num
- ((vq
->vq_used_cons_idx
+ num
) % DESC_PER_CACHELINE
);
916 num
= virtqueue_dequeue_burst_rx(vq
, rcv_pkts
, len
, num
);
917 PMD_RX_LOG(DEBUG
, "used:%d dequeue:%d", nb_used
, num
);
920 hdr_size
= hw
->vtnet_hdr_size
;
922 for (i
= 0; i
< num
; i
++) {
925 PMD_RX_LOG(DEBUG
, "packet len:%d", len
[i
]);
927 if (unlikely(len
[i
] < hdr_size
+ ETHER_HDR_LEN
)) {
928 PMD_RX_LOG(ERR
, "Packet drop");
930 virtio_discard_rxbuf(vq
, rxm
);
931 rxvq
->stats
.errors
++;
935 rxm
->port
= rxvq
->port_id
;
936 rxm
->data_off
= RTE_PKTMBUF_HEADROOM
;
940 rxm
->pkt_len
= (uint32_t)(len
[i
] - hdr_size
);
941 rxm
->data_len
= (uint16_t)(len
[i
] - hdr_size
);
943 hdr
= (struct virtio_net_hdr
*)((char *)rxm
->buf_addr
+
944 RTE_PKTMBUF_HEADROOM
- hdr_size
);
949 if (hw
->has_rx_offload
&& virtio_rx_offload(rxm
, hdr
) < 0) {
950 virtio_discard_rxbuf(vq
, rxm
);
951 rxvq
->stats
.errors
++;
955 virtio_rx_stats_updated(rxvq
, rxm
);
957 rx_pkts
[nb_rx
++] = rxm
;
960 rxvq
->stats
.packets
+= nb_rx
;
962 /* Allocate new mbuf for the used descriptor */
963 while (likely(!virtqueue_full(vq
))) {
964 new_mbuf
= rte_mbuf_raw_alloc(rxvq
->mpool
);
965 if (unlikely(new_mbuf
== NULL
)) {
966 struct rte_eth_dev
*dev
967 = &rte_eth_devices
[rxvq
->port_id
];
968 dev
->data
->rx_mbuf_alloc_failed
++;
971 error
= virtqueue_enqueue_recv_refill(vq
, new_mbuf
);
972 if (unlikely(error
)) {
973 rte_pktmbuf_free(new_mbuf
);
979 if (likely(nb_enqueued
)) {
980 vq_update_avail_idx(vq
);
982 if (unlikely(virtqueue_kick_prepare(vq
))) {
983 virtqueue_notify(vq
);
984 PMD_RX_LOG(DEBUG
, "Notified");
992 virtio_recv_mergeable_pkts_inorder(void *rx_queue
,
993 struct rte_mbuf
**rx_pkts
,
996 struct virtnet_rx
*rxvq
= rx_queue
;
997 struct virtqueue
*vq
= rxvq
->vq
;
998 struct virtio_hw
*hw
= vq
->hw
;
999 struct rte_mbuf
*rxm
;
1000 struct rte_mbuf
*prev
;
1001 uint16_t nb_used
, num
, nb_rx
;
1002 uint32_t len
[VIRTIO_MBUF_BURST_SZ
];
1003 struct rte_mbuf
*rcv_pkts
[VIRTIO_MBUF_BURST_SZ
];
1005 uint32_t nb_enqueued
;
1012 if (unlikely(hw
->started
== 0))
1015 nb_used
= VIRTQUEUE_NUSED(vq
);
1016 nb_used
= RTE_MIN(nb_used
, nb_pkts
);
1017 nb_used
= RTE_MIN(nb_used
, VIRTIO_MBUF_BURST_SZ
);
1021 PMD_RX_LOG(DEBUG
, "used:%d", nb_used
);
1026 hdr_size
= hw
->vtnet_hdr_size
;
1028 num
= virtqueue_dequeue_rx_inorder(vq
, rcv_pkts
, len
, nb_used
);
1030 for (i
= 0; i
< num
; i
++) {
1031 struct virtio_net_hdr_mrg_rxbuf
*header
;
1033 PMD_RX_LOG(DEBUG
, "dequeue:%d", num
);
1034 PMD_RX_LOG(DEBUG
, "packet len:%d", len
[i
]);
1038 if (unlikely(len
[i
] < hdr_size
+ ETHER_HDR_LEN
)) {
1039 PMD_RX_LOG(ERR
, "Packet drop");
1041 virtio_discard_rxbuf_inorder(vq
, rxm
);
1042 rxvq
->stats
.errors
++;
1046 header
= (struct virtio_net_hdr_mrg_rxbuf
*)
1047 ((char *)rxm
->buf_addr
+ RTE_PKTMBUF_HEADROOM
1049 seg_num
= header
->num_buffers
;
1054 rxm
->data_off
= RTE_PKTMBUF_HEADROOM
;
1055 rxm
->nb_segs
= seg_num
;
1058 rxm
->pkt_len
= (uint32_t)(len
[i
] - hdr_size
);
1059 rxm
->data_len
= (uint16_t)(len
[i
] - hdr_size
);
1061 rxm
->port
= rxvq
->port_id
;
1063 rx_pkts
[nb_rx
] = rxm
;
1066 if (vq
->hw
->has_rx_offload
&&
1067 virtio_rx_offload(rxm
, &header
->hdr
) < 0) {
1068 virtio_discard_rxbuf_inorder(vq
, rxm
);
1069 rxvq
->stats
.errors
++;
1074 rte_vlan_strip(rx_pkts
[nb_rx
]);
1076 seg_res
= seg_num
- 1;
1078 /* Merge remaining segments */
1079 while (seg_res
!= 0 && i
< (num
- 1)) {
1083 rxm
->data_off
= RTE_PKTMBUF_HEADROOM
- hdr_size
;
1084 rxm
->pkt_len
= (uint32_t)(len
[i
]);
1085 rxm
->data_len
= (uint16_t)(len
[i
]);
1087 rx_pkts
[nb_rx
]->pkt_len
+= (uint32_t)(len
[i
]);
1088 rx_pkts
[nb_rx
]->data_len
+= (uint16_t)(len
[i
]);
1098 virtio_rx_stats_updated(rxvq
, rx_pkts
[nb_rx
]);
1103 /* Last packet still need merge segments */
1104 while (seg_res
!= 0) {
1105 uint16_t rcv_cnt
= RTE_MIN((uint16_t)seg_res
,
1106 VIRTIO_MBUF_BURST_SZ
);
1108 prev
= rcv_pkts
[nb_rx
];
1109 if (likely(VIRTQUEUE_NUSED(vq
) >= rcv_cnt
)) {
1110 num
= virtqueue_dequeue_rx_inorder(vq
, rcv_pkts
, len
,
1112 uint16_t extra_idx
= 0;
1115 while (extra_idx
< rcv_cnt
) {
1116 rxm
= rcv_pkts
[extra_idx
];
1118 RTE_PKTMBUF_HEADROOM
- hdr_size
;
1119 rxm
->pkt_len
= (uint32_t)(len
[extra_idx
]);
1120 rxm
->data_len
= (uint16_t)(len
[extra_idx
]);
1123 rx_pkts
[nb_rx
]->pkt_len
+= len
[extra_idx
];
1124 rx_pkts
[nb_rx
]->data_len
+= len
[extra_idx
];
1130 virtio_rx_stats_updated(rxvq
, rx_pkts
[nb_rx
]);
1135 "No enough segments for packet.");
1136 virtio_discard_rxbuf_inorder(vq
, prev
);
1137 rxvq
->stats
.errors
++;
1142 rxvq
->stats
.packets
+= nb_rx
;
1144 /* Allocate new mbuf for the used descriptor */
1146 if (likely(!virtqueue_full(vq
))) {
1147 /* free_cnt may include mrg descs */
1148 uint16_t free_cnt
= vq
->vq_free_cnt
;
1149 struct rte_mbuf
*new_pkts
[free_cnt
];
1151 if (!rte_pktmbuf_alloc_bulk(rxvq
->mpool
, new_pkts
, free_cnt
)) {
1152 error
= virtqueue_enqueue_refill_inorder(vq
, new_pkts
,
1154 if (unlikely(error
)) {
1155 for (i
= 0; i
< free_cnt
; i
++)
1156 rte_pktmbuf_free(new_pkts
[i
]);
1158 nb_enqueued
+= free_cnt
;
1160 struct rte_eth_dev
*dev
=
1161 &rte_eth_devices
[rxvq
->port_id
];
1162 dev
->data
->rx_mbuf_alloc_failed
+= free_cnt
;
1166 if (likely(nb_enqueued
)) {
1167 vq_update_avail_idx(vq
);
1169 if (unlikely(virtqueue_kick_prepare(vq
))) {
1170 virtqueue_notify(vq
);
1171 PMD_RX_LOG(DEBUG
, "Notified");
1179 virtio_recv_mergeable_pkts(void *rx_queue
,
1180 struct rte_mbuf
**rx_pkts
,
1183 struct virtnet_rx
*rxvq
= rx_queue
;
1184 struct virtqueue
*vq
= rxvq
->vq
;
1185 struct virtio_hw
*hw
= vq
->hw
;
1186 struct rte_mbuf
*rxm
, *new_mbuf
;
1187 uint16_t nb_used
, num
, nb_rx
;
1188 uint32_t len
[VIRTIO_MBUF_BURST_SZ
];
1189 struct rte_mbuf
*rcv_pkts
[VIRTIO_MBUF_BURST_SZ
];
1190 struct rte_mbuf
*prev
;
1192 uint32_t i
, nb_enqueued
;
1199 if (unlikely(hw
->started
== 0))
1202 nb_used
= VIRTQUEUE_NUSED(vq
);
1206 PMD_RX_LOG(DEBUG
, "used:%d", nb_used
);
1213 hdr_size
= hw
->vtnet_hdr_size
;
1215 while (i
< nb_used
) {
1216 struct virtio_net_hdr_mrg_rxbuf
*header
;
1218 if (nb_rx
== nb_pkts
)
1221 num
= virtqueue_dequeue_burst_rx(vq
, rcv_pkts
, len
, 1);
1227 PMD_RX_LOG(DEBUG
, "dequeue:%d", num
);
1228 PMD_RX_LOG(DEBUG
, "packet len:%d", len
[0]);
1232 if (unlikely(len
[0] < hdr_size
+ ETHER_HDR_LEN
)) {
1233 PMD_RX_LOG(ERR
, "Packet drop");
1235 virtio_discard_rxbuf(vq
, rxm
);
1236 rxvq
->stats
.errors
++;
1240 header
= (struct virtio_net_hdr_mrg_rxbuf
*)((char *)rxm
->buf_addr
+
1241 RTE_PKTMBUF_HEADROOM
- hdr_size
);
1242 seg_num
= header
->num_buffers
;
1247 rxm
->data_off
= RTE_PKTMBUF_HEADROOM
;
1248 rxm
->nb_segs
= seg_num
;
1251 rxm
->pkt_len
= (uint32_t)(len
[0] - hdr_size
);
1252 rxm
->data_len
= (uint16_t)(len
[0] - hdr_size
);
1254 rxm
->port
= rxvq
->port_id
;
1255 rx_pkts
[nb_rx
] = rxm
;
1258 if (hw
->has_rx_offload
&&
1259 virtio_rx_offload(rxm
, &header
->hdr
) < 0) {
1260 virtio_discard_rxbuf(vq
, rxm
);
1261 rxvq
->stats
.errors
++;
1265 seg_res
= seg_num
- 1;
1267 while (seg_res
!= 0) {
1269 * Get extra segments for current uncompleted packet.
1272 RTE_MIN(seg_res
, RTE_DIM(rcv_pkts
));
1273 if (likely(VIRTQUEUE_NUSED(vq
) >= rcv_cnt
)) {
1275 virtqueue_dequeue_burst_rx(vq
,
1276 rcv_pkts
, len
, rcv_cnt
);
1281 "No enough segments for packet.");
1283 virtio_discard_rxbuf(vq
, rxm
);
1284 rxvq
->stats
.errors
++;
1290 while (extra_idx
< rcv_cnt
) {
1291 rxm
= rcv_pkts
[extra_idx
];
1293 rxm
->data_off
= RTE_PKTMBUF_HEADROOM
- hdr_size
;
1294 rxm
->pkt_len
= (uint32_t)(len
[extra_idx
]);
1295 rxm
->data_len
= (uint16_t)(len
[extra_idx
]);
1301 rx_pkts
[nb_rx
]->pkt_len
+= rxm
->pkt_len
;
1308 rte_vlan_strip(rx_pkts
[nb_rx
]);
1310 VIRTIO_DUMP_PACKET(rx_pkts
[nb_rx
],
1311 rx_pkts
[nb_rx
]->data_len
);
1313 rxvq
->stats
.bytes
+= rx_pkts
[nb_rx
]->pkt_len
;
1314 virtio_update_packet_stats(&rxvq
->stats
, rx_pkts
[nb_rx
]);
1318 rxvq
->stats
.packets
+= nb_rx
;
1320 /* Allocate new mbuf for the used descriptor */
1321 while (likely(!virtqueue_full(vq
))) {
1322 new_mbuf
= rte_mbuf_raw_alloc(rxvq
->mpool
);
1323 if (unlikely(new_mbuf
== NULL
)) {
1324 struct rte_eth_dev
*dev
1325 = &rte_eth_devices
[rxvq
->port_id
];
1326 dev
->data
->rx_mbuf_alloc_failed
++;
1329 error
= virtqueue_enqueue_recv_refill(vq
, new_mbuf
);
1330 if (unlikely(error
)) {
1331 rte_pktmbuf_free(new_mbuf
);
1337 if (likely(nb_enqueued
)) {
1338 vq_update_avail_idx(vq
);
1340 if (unlikely(virtqueue_kick_prepare(vq
))) {
1341 virtqueue_notify(vq
);
1342 PMD_RX_LOG(DEBUG
, "Notified");
1350 virtio_xmit_pkts(void *tx_queue
, struct rte_mbuf
**tx_pkts
, uint16_t nb_pkts
)
1352 struct virtnet_tx
*txvq
= tx_queue
;
1353 struct virtqueue
*vq
= txvq
->vq
;
1354 struct virtio_hw
*hw
= vq
->hw
;
1355 uint16_t hdr_size
= hw
->vtnet_hdr_size
;
1356 uint16_t nb_used
, nb_tx
= 0;
1359 if (unlikely(hw
->started
== 0 && tx_pkts
!= hw
->inject_pkts
))
1362 if (unlikely(nb_pkts
< 1))
1365 PMD_TX_LOG(DEBUG
, "%d packets to xmit", nb_pkts
);
1366 nb_used
= VIRTQUEUE_NUSED(vq
);
1369 if (likely(nb_used
> vq
->vq_nentries
- vq
->vq_free_thresh
))
1370 virtio_xmit_cleanup(vq
, nb_used
);
1372 for (nb_tx
= 0; nb_tx
< nb_pkts
; nb_tx
++) {
1373 struct rte_mbuf
*txm
= tx_pkts
[nb_tx
];
1374 int can_push
= 0, use_indirect
= 0, slots
, need
;
1376 /* Do VLAN tag insertion */
1377 if (unlikely(txm
->ol_flags
& PKT_TX_VLAN_PKT
)) {
1378 error
= rte_vlan_insert(&txm
);
1379 if (unlikely(error
)) {
1380 rte_pktmbuf_free(txm
);
1385 /* optimize ring usage */
1386 if ((vtpci_with_feature(hw
, VIRTIO_F_ANY_LAYOUT
) ||
1387 vtpci_with_feature(hw
, VIRTIO_F_VERSION_1
)) &&
1388 rte_mbuf_refcnt_read(txm
) == 1 &&
1389 RTE_MBUF_DIRECT(txm
) &&
1390 txm
->nb_segs
== 1 &&
1391 rte_pktmbuf_headroom(txm
) >= hdr_size
&&
1392 rte_is_aligned(rte_pktmbuf_mtod(txm
, char *),
1393 __alignof__(struct virtio_net_hdr_mrg_rxbuf
)))
1395 else if (vtpci_with_feature(hw
, VIRTIO_RING_F_INDIRECT_DESC
) &&
1396 txm
->nb_segs
< VIRTIO_MAX_TX_INDIRECT
)
1399 /* How many main ring entries are needed to this Tx?
1400 * any_layout => number of segments
1402 * default => number of segments + 1
1404 slots
= use_indirect
? 1 : (txm
->nb_segs
+ !can_push
);
1405 need
= slots
- vq
->vq_free_cnt
;
1407 /* Positive value indicates it need free vring descriptors */
1408 if (unlikely(need
> 0)) {
1409 nb_used
= VIRTQUEUE_NUSED(vq
);
1411 need
= RTE_MIN(need
, (int)nb_used
);
1413 virtio_xmit_cleanup(vq
, need
);
1414 need
= slots
- vq
->vq_free_cnt
;
1415 if (unlikely(need
> 0)) {
1417 "No free tx descriptors to transmit");
1422 /* Enqueue Packet buffers */
1423 virtqueue_enqueue_xmit(txvq
, txm
, slots
, use_indirect
,
1426 txvq
->stats
.bytes
+= txm
->pkt_len
;
1427 virtio_update_packet_stats(&txvq
->stats
, txm
);
1430 txvq
->stats
.packets
+= nb_tx
;
1432 if (likely(nb_tx
)) {
1433 vq_update_avail_idx(vq
);
1435 if (unlikely(virtqueue_kick_prepare(vq
))) {
1436 virtqueue_notify(vq
);
1437 PMD_TX_LOG(DEBUG
, "Notified backend after xmit");
1445 virtio_xmit_pkts_inorder(void *tx_queue
,
1446 struct rte_mbuf
**tx_pkts
,
1449 struct virtnet_tx
*txvq
= tx_queue
;
1450 struct virtqueue
*vq
= txvq
->vq
;
1451 struct virtio_hw
*hw
= vq
->hw
;
1452 uint16_t hdr_size
= hw
->vtnet_hdr_size
;
1453 uint16_t nb_used
, nb_avail
, nb_tx
= 0, nb_inorder_pkts
= 0;
1454 struct rte_mbuf
*inorder_pkts
[nb_pkts
];
1457 if (unlikely(hw
->started
== 0 && tx_pkts
!= hw
->inject_pkts
))
1460 if (unlikely(nb_pkts
< 1))
1464 PMD_TX_LOG(DEBUG
, "%d packets to xmit", nb_pkts
);
1465 nb_used
= VIRTQUEUE_NUSED(vq
);
1468 if (likely(nb_used
> vq
->vq_nentries
- vq
->vq_free_thresh
))
1469 virtio_xmit_cleanup_inorder(vq
, nb_used
);
1471 if (unlikely(!vq
->vq_free_cnt
))
1472 virtio_xmit_cleanup_inorder(vq
, nb_used
);
1474 nb_avail
= RTE_MIN(vq
->vq_free_cnt
, nb_pkts
);
1476 for (nb_tx
= 0; nb_tx
< nb_avail
; nb_tx
++) {
1477 struct rte_mbuf
*txm
= tx_pkts
[nb_tx
];
1480 /* Do VLAN tag insertion */
1481 if (unlikely(txm
->ol_flags
& PKT_TX_VLAN_PKT
)) {
1482 error
= rte_vlan_insert(&txm
);
1483 if (unlikely(error
)) {
1484 rte_pktmbuf_free(txm
);
1489 /* optimize ring usage */
1490 if ((vtpci_with_feature(hw
, VIRTIO_F_ANY_LAYOUT
) ||
1491 vtpci_with_feature(hw
, VIRTIO_F_VERSION_1
)) &&
1492 rte_mbuf_refcnt_read(txm
) == 1 &&
1493 RTE_MBUF_DIRECT(txm
) &&
1494 txm
->nb_segs
== 1 &&
1495 rte_pktmbuf_headroom(txm
) >= hdr_size
&&
1496 rte_is_aligned(rte_pktmbuf_mtod(txm
, char *),
1497 __alignof__(struct virtio_net_hdr_mrg_rxbuf
))) {
1498 inorder_pkts
[nb_inorder_pkts
] = txm
;
1501 txvq
->stats
.bytes
+= txm
->pkt_len
;
1502 virtio_update_packet_stats(&txvq
->stats
, txm
);
1506 if (nb_inorder_pkts
) {
1507 virtqueue_enqueue_xmit_inorder(txvq
, inorder_pkts
,
1509 nb_inorder_pkts
= 0;
1512 slots
= txm
->nb_segs
+ 1;
1513 need
= slots
- vq
->vq_free_cnt
;
1514 if (unlikely(need
> 0)) {
1515 nb_used
= VIRTQUEUE_NUSED(vq
);
1517 need
= RTE_MIN(need
, (int)nb_used
);
1519 virtio_xmit_cleanup_inorder(vq
, need
);
1521 need
= slots
- vq
->vq_free_cnt
;
1523 if (unlikely(need
> 0)) {
1525 "No free tx descriptors to transmit");
1529 /* Enqueue Packet buffers */
1530 virtqueue_enqueue_xmit(txvq
, txm
, slots
, 0, 0, 1);
1532 txvq
->stats
.bytes
+= txm
->pkt_len
;
1533 virtio_update_packet_stats(&txvq
->stats
, txm
);
1536 /* Transmit all inorder packets */
1537 if (nb_inorder_pkts
)
1538 virtqueue_enqueue_xmit_inorder(txvq
, inorder_pkts
,
1541 txvq
->stats
.packets
+= nb_tx
;
1543 if (likely(nb_tx
)) {
1544 vq_update_avail_idx(vq
);
1546 if (unlikely(virtqueue_kick_prepare(vq
))) {
1547 virtqueue_notify(vq
);
1548 PMD_TX_LOG(DEBUG
, "Notified backend after xmit");