1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2010-2014 Intel Corporation
11 #include <rte_cycles.h>
12 #include <rte_memory.h>
13 #include <rte_branch_prediction.h>
14 #include <rte_mempool.h>
15 #include <rte_malloc.h>
17 #include <rte_ether.h>
18 #include <rte_ethdev_driver.h>
19 #include <rte_prefetch.h>
20 #include <rte_string_fns.h>
21 #include <rte_errno.h>
22 #include <rte_byteorder.h>
28 #include "virtio_logs.h"
29 #include "virtio_ethdev.h"
30 #include "virtio_pci.h"
31 #include "virtqueue.h"
32 #include "virtio_rxtx.h"
33 #include "virtio_rxtx_simple.h"
34 #include "virtio_ring.h"
36 #ifdef RTE_LIBRTE_VIRTIO_DEBUG_DUMP
37 #define VIRTIO_DUMP_PACKET(m, len) rte_pktmbuf_dump(stdout, m, len)
39 #define VIRTIO_DUMP_PACKET(m, len) do { } while (0)
43 virtio_dev_rx_queue_done(void *rxq
, uint16_t offset
)
45 struct virtnet_rx
*rxvq
= rxq
;
46 struct virtqueue
*vq
= rxvq
->vq
;
48 return virtqueue_nused(vq
) >= offset
;
52 vq_ring_free_inorder(struct virtqueue
*vq
, uint16_t desc_idx
, uint16_t num
)
54 vq
->vq_free_cnt
+= num
;
55 vq
->vq_desc_tail_idx
= desc_idx
& (vq
->vq_nentries
- 1);
59 vq_ring_free_chain(struct virtqueue
*vq
, uint16_t desc_idx
)
61 struct vring_desc
*dp
, *dp_tail
;
62 struct vq_desc_extra
*dxp
;
63 uint16_t desc_idx_last
= desc_idx
;
65 dp
= &vq
->vq_split
.ring
.desc
[desc_idx
];
66 dxp
= &vq
->vq_descx
[desc_idx
];
67 vq
->vq_free_cnt
= (uint16_t)(vq
->vq_free_cnt
+ dxp
->ndescs
);
68 if ((dp
->flags
& VRING_DESC_F_INDIRECT
) == 0) {
69 while (dp
->flags
& VRING_DESC_F_NEXT
) {
70 desc_idx_last
= dp
->next
;
71 dp
= &vq
->vq_split
.ring
.desc
[dp
->next
];
77 * We must append the existing free chain, if any, to the end of
78 * newly freed chain. If the virtqueue was completely used, then
79 * head would be VQ_RING_DESC_CHAIN_END (ASSERTed above).
81 if (vq
->vq_desc_tail_idx
== VQ_RING_DESC_CHAIN_END
) {
82 vq
->vq_desc_head_idx
= desc_idx
;
84 dp_tail
= &vq
->vq_split
.ring
.desc
[vq
->vq_desc_tail_idx
];
85 dp_tail
->next
= desc_idx
;
88 vq
->vq_desc_tail_idx
= desc_idx_last
;
89 dp
->next
= VQ_RING_DESC_CHAIN_END
;
93 virtio_update_packet_stats(struct virtnet_stats
*stats
, struct rte_mbuf
*mbuf
)
95 uint32_t s
= mbuf
->pkt_len
;
96 struct rte_ether_addr
*ea
;
101 stats
->size_bins
[1]++;
102 } else if (s
> 64 && s
< 1024) {
105 /* count zeros, and offset into correct bin */
106 bin
= (sizeof(s
) * 8) - __builtin_clz(s
) - 5;
107 stats
->size_bins
[bin
]++;
110 stats
->size_bins
[0]++;
112 stats
->size_bins
[6]++;
114 stats
->size_bins
[7]++;
117 ea
= rte_pktmbuf_mtod(mbuf
, struct rte_ether_addr
*);
118 if (rte_is_multicast_ether_addr(ea
)) {
119 if (rte_is_broadcast_ether_addr(ea
))
127 virtio_rx_stats_updated(struct virtnet_rx
*rxvq
, struct rte_mbuf
*m
)
129 VIRTIO_DUMP_PACKET(m
, m
->data_len
);
131 virtio_update_packet_stats(&rxvq
->stats
, m
);
135 virtqueue_dequeue_burst_rx_packed(struct virtqueue
*vq
,
136 struct rte_mbuf
**rx_pkts
,
140 struct rte_mbuf
*cookie
;
143 struct vring_packed_desc
*desc
;
146 desc
= vq
->vq_packed
.ring
.desc
;
148 for (i
= 0; i
< num
; i
++) {
149 used_idx
= vq
->vq_used_cons_idx
;
150 /* desc_is_used has a load-acquire or rte_cio_rmb inside
151 * and wait for used desc in virtqueue.
153 if (!desc_is_used(&desc
[used_idx
], vq
))
155 len
[i
] = desc
[used_idx
].len
;
156 id
= desc
[used_idx
].id
;
157 cookie
= (struct rte_mbuf
*)vq
->vq_descx
[id
].cookie
;
158 if (unlikely(cookie
== NULL
)) {
159 PMD_DRV_LOG(ERR
, "vring descriptor with no mbuf cookie at %u",
160 vq
->vq_used_cons_idx
);
163 rte_prefetch0(cookie
);
164 rte_packet_prefetch(rte_pktmbuf_mtod(cookie
, void *));
168 vq
->vq_used_cons_idx
++;
169 if (vq
->vq_used_cons_idx
>= vq
->vq_nentries
) {
170 vq
->vq_used_cons_idx
-= vq
->vq_nentries
;
171 vq
->vq_packed
.used_wrap_counter
^= 1;
179 virtqueue_dequeue_burst_rx(struct virtqueue
*vq
, struct rte_mbuf
**rx_pkts
,
180 uint32_t *len
, uint16_t num
)
182 struct vring_used_elem
*uep
;
183 struct rte_mbuf
*cookie
;
184 uint16_t used_idx
, desc_idx
;
187 /* Caller does the check */
188 for (i
= 0; i
< num
; i
++) {
189 used_idx
= (uint16_t)(vq
->vq_used_cons_idx
& (vq
->vq_nentries
- 1));
190 uep
= &vq
->vq_split
.ring
.used
->ring
[used_idx
];
191 desc_idx
= (uint16_t) uep
->id
;
193 cookie
= (struct rte_mbuf
*)vq
->vq_descx
[desc_idx
].cookie
;
195 if (unlikely(cookie
== NULL
)) {
196 PMD_DRV_LOG(ERR
, "vring descriptor with no mbuf cookie at %u",
197 vq
->vq_used_cons_idx
);
201 rte_prefetch0(cookie
);
202 rte_packet_prefetch(rte_pktmbuf_mtod(cookie
, void *));
204 vq
->vq_used_cons_idx
++;
205 vq_ring_free_chain(vq
, desc_idx
);
206 vq
->vq_descx
[desc_idx
].cookie
= NULL
;
213 virtqueue_dequeue_rx_inorder(struct virtqueue
*vq
,
214 struct rte_mbuf
**rx_pkts
,
218 struct vring_used_elem
*uep
;
219 struct rte_mbuf
*cookie
;
220 uint16_t used_idx
= 0;
223 if (unlikely(num
== 0))
226 for (i
= 0; i
< num
; i
++) {
227 used_idx
= vq
->vq_used_cons_idx
& (vq
->vq_nentries
- 1);
228 /* Desc idx same as used idx */
229 uep
= &vq
->vq_split
.ring
.used
->ring
[used_idx
];
231 cookie
= (struct rte_mbuf
*)vq
->vq_descx
[used_idx
].cookie
;
233 if (unlikely(cookie
== NULL
)) {
234 PMD_DRV_LOG(ERR
, "vring descriptor with no mbuf cookie at %u",
235 vq
->vq_used_cons_idx
);
239 rte_prefetch0(cookie
);
240 rte_packet_prefetch(rte_pktmbuf_mtod(cookie
, void *));
242 vq
->vq_used_cons_idx
++;
243 vq
->vq_descx
[used_idx
].cookie
= NULL
;
246 vq_ring_free_inorder(vq
, used_idx
, i
);
251 virtqueue_enqueue_refill_inorder(struct virtqueue
*vq
,
252 struct rte_mbuf
**cookies
,
255 struct vq_desc_extra
*dxp
;
256 struct virtio_hw
*hw
= vq
->hw
;
257 struct vring_desc
*start_dp
;
258 uint16_t head_idx
, idx
, i
= 0;
260 if (unlikely(vq
->vq_free_cnt
== 0))
262 if (unlikely(vq
->vq_free_cnt
< num
))
265 head_idx
= vq
->vq_desc_head_idx
& (vq
->vq_nentries
- 1);
266 start_dp
= vq
->vq_split
.ring
.desc
;
269 idx
= head_idx
& (vq
->vq_nentries
- 1);
270 dxp
= &vq
->vq_descx
[idx
];
271 dxp
->cookie
= (void *)cookies
[i
];
275 VIRTIO_MBUF_ADDR(cookies
[i
], vq
) +
276 RTE_PKTMBUF_HEADROOM
- hw
->vtnet_hdr_size
;
278 cookies
[i
]->buf_len
-
279 RTE_PKTMBUF_HEADROOM
+
281 start_dp
[idx
].flags
= VRING_DESC_F_WRITE
;
283 vq_update_avail_ring(vq
, idx
);
288 vq
->vq_desc_head_idx
+= num
;
289 vq
->vq_free_cnt
= (uint16_t)(vq
->vq_free_cnt
- num
);
294 virtqueue_enqueue_recv_refill(struct virtqueue
*vq
, struct rte_mbuf
**cookie
,
297 struct vq_desc_extra
*dxp
;
298 struct virtio_hw
*hw
= vq
->hw
;
299 struct vring_desc
*start_dp
= vq
->vq_split
.ring
.desc
;
302 if (unlikely(vq
->vq_free_cnt
== 0))
304 if (unlikely(vq
->vq_free_cnt
< num
))
307 if (unlikely(vq
->vq_desc_head_idx
>= vq
->vq_nentries
))
310 for (i
= 0; i
< num
; i
++) {
311 idx
= vq
->vq_desc_head_idx
;
312 dxp
= &vq
->vq_descx
[idx
];
313 dxp
->cookie
= (void *)cookie
[i
];
317 VIRTIO_MBUF_ADDR(cookie
[i
], vq
) +
318 RTE_PKTMBUF_HEADROOM
- hw
->vtnet_hdr_size
;
320 cookie
[i
]->buf_len
- RTE_PKTMBUF_HEADROOM
+
322 start_dp
[idx
].flags
= VRING_DESC_F_WRITE
;
323 vq
->vq_desc_head_idx
= start_dp
[idx
].next
;
324 vq_update_avail_ring(vq
, idx
);
325 if (vq
->vq_desc_head_idx
== VQ_RING_DESC_CHAIN_END
) {
326 vq
->vq_desc_tail_idx
= vq
->vq_desc_head_idx
;
331 vq
->vq_free_cnt
= (uint16_t)(vq
->vq_free_cnt
- num
);
337 virtqueue_enqueue_recv_refill_packed(struct virtqueue
*vq
,
338 struct rte_mbuf
**cookie
, uint16_t num
)
340 struct vring_packed_desc
*start_dp
= vq
->vq_packed
.ring
.desc
;
341 uint16_t flags
= vq
->vq_packed
.cached_flags
;
342 struct virtio_hw
*hw
= vq
->hw
;
343 struct vq_desc_extra
*dxp
;
347 if (unlikely(vq
->vq_free_cnt
== 0))
349 if (unlikely(vq
->vq_free_cnt
< num
))
352 for (i
= 0; i
< num
; i
++) {
353 idx
= vq
->vq_avail_idx
;
354 dxp
= &vq
->vq_descx
[idx
];
355 dxp
->cookie
= (void *)cookie
[i
];
358 start_dp
[idx
].addr
= VIRTIO_MBUF_ADDR(cookie
[i
], vq
) +
359 RTE_PKTMBUF_HEADROOM
- hw
->vtnet_hdr_size
;
360 start_dp
[idx
].len
= cookie
[i
]->buf_len
- RTE_PKTMBUF_HEADROOM
361 + hw
->vtnet_hdr_size
;
363 vq
->vq_desc_head_idx
= dxp
->next
;
364 if (vq
->vq_desc_head_idx
== VQ_RING_DESC_CHAIN_END
)
365 vq
->vq_desc_tail_idx
= vq
->vq_desc_head_idx
;
367 virtqueue_store_flags_packed(&start_dp
[idx
], flags
,
370 if (++vq
->vq_avail_idx
>= vq
->vq_nentries
) {
371 vq
->vq_avail_idx
-= vq
->vq_nentries
;
372 vq
->vq_packed
.cached_flags
^=
373 VRING_PACKED_DESC_F_AVAIL_USED
;
374 flags
= vq
->vq_packed
.cached_flags
;
377 vq
->vq_free_cnt
= (uint16_t)(vq
->vq_free_cnt
- num
);
381 /* When doing TSO, the IP length is not included in the pseudo header
382 * checksum of the packet given to the PMD, but for virtio it is
386 virtio_tso_fix_cksum(struct rte_mbuf
*m
)
388 /* common case: header is not fragmented */
389 if (likely(rte_pktmbuf_data_len(m
) >= m
->l2_len
+ m
->l3_len
+
391 struct rte_ipv4_hdr
*iph
;
392 struct rte_ipv6_hdr
*ip6h
;
393 struct rte_tcp_hdr
*th
;
394 uint16_t prev_cksum
, new_cksum
, ip_len
, ip_paylen
;
397 iph
= rte_pktmbuf_mtod_offset(m
,
398 struct rte_ipv4_hdr
*, m
->l2_len
);
399 th
= RTE_PTR_ADD(iph
, m
->l3_len
);
400 if ((iph
->version_ihl
>> 4) == 4) {
401 iph
->hdr_checksum
= 0;
402 iph
->hdr_checksum
= rte_ipv4_cksum(iph
);
403 ip_len
= iph
->total_length
;
404 ip_paylen
= rte_cpu_to_be_16(rte_be_to_cpu_16(ip_len
) -
407 ip6h
= (struct rte_ipv6_hdr
*)iph
;
408 ip_paylen
= ip6h
->payload_len
;
411 /* calculate the new phdr checksum not including ip_paylen */
412 prev_cksum
= th
->cksum
;
415 tmp
= (tmp
& 0xffff) + (tmp
>> 16);
418 /* replace it in the packet */
419 th
->cksum
= new_cksum
;
427 virtqueue_enqueue_xmit_inorder(struct virtnet_tx
*txvq
,
428 struct rte_mbuf
**cookies
,
431 struct vq_desc_extra
*dxp
;
432 struct virtqueue
*vq
= txvq
->vq
;
433 struct vring_desc
*start_dp
;
434 struct virtio_net_hdr
*hdr
;
436 int16_t head_size
= vq
->hw
->vtnet_hdr_size
;
439 idx
= vq
->vq_desc_head_idx
;
440 start_dp
= vq
->vq_split
.ring
.desc
;
443 idx
= idx
& (vq
->vq_nentries
- 1);
444 dxp
= &vq
->vq_descx
[vq
->vq_avail_idx
& (vq
->vq_nentries
- 1)];
445 dxp
->cookie
= (void *)cookies
[i
];
447 virtio_update_packet_stats(&txvq
->stats
, cookies
[i
]);
449 hdr
= rte_pktmbuf_mtod_offset(cookies
[i
],
450 struct virtio_net_hdr
*, -head_size
);
452 /* if offload disabled, hdr is not zeroed yet, do it now */
453 if (!vq
->hw
->has_tx_offload
)
454 virtqueue_clear_net_hdr(hdr
);
456 virtqueue_xmit_offload(hdr
, cookies
[i
], true);
459 VIRTIO_MBUF_DATA_DMA_ADDR(cookies
[i
], vq
) - head_size
;
460 start_dp
[idx
].len
= cookies
[i
]->data_len
+ head_size
;
461 start_dp
[idx
].flags
= 0;
464 vq_update_avail_ring(vq
, idx
);
470 vq
->vq_free_cnt
= (uint16_t)(vq
->vq_free_cnt
- num
);
471 vq
->vq_desc_head_idx
= idx
& (vq
->vq_nentries
- 1);
475 virtqueue_enqueue_xmit_packed_fast(struct virtnet_tx
*txvq
,
476 struct rte_mbuf
*cookie
,
479 struct virtqueue
*vq
= txvq
->vq
;
480 struct vring_packed_desc
*dp
;
481 struct vq_desc_extra
*dxp
;
482 uint16_t idx
, id
, flags
;
483 int16_t head_size
= vq
->hw
->vtnet_hdr_size
;
484 struct virtio_net_hdr
*hdr
;
486 id
= in_order
? vq
->vq_avail_idx
: vq
->vq_desc_head_idx
;
487 idx
= vq
->vq_avail_idx
;
488 dp
= &vq
->vq_packed
.ring
.desc
[idx
];
490 dxp
= &vq
->vq_descx
[id
];
492 dxp
->cookie
= cookie
;
494 flags
= vq
->vq_packed
.cached_flags
;
496 /* prepend cannot fail, checked by caller */
497 hdr
= rte_pktmbuf_mtod_offset(cookie
, struct virtio_net_hdr
*,
500 /* if offload disabled, hdr is not zeroed yet, do it now */
501 if (!vq
->hw
->has_tx_offload
)
502 virtqueue_clear_net_hdr(hdr
);
504 virtqueue_xmit_offload(hdr
, cookie
, true);
506 dp
->addr
= VIRTIO_MBUF_DATA_DMA_ADDR(cookie
, vq
) - head_size
;
507 dp
->len
= cookie
->data_len
+ head_size
;
510 if (++vq
->vq_avail_idx
>= vq
->vq_nentries
) {
511 vq
->vq_avail_idx
-= vq
->vq_nentries
;
512 vq
->vq_packed
.cached_flags
^= VRING_PACKED_DESC_F_AVAIL_USED
;
518 vq
->vq_desc_head_idx
= dxp
->next
;
519 if (vq
->vq_desc_head_idx
== VQ_RING_DESC_CHAIN_END
)
520 vq
->vq_desc_tail_idx
= VQ_RING_DESC_CHAIN_END
;
523 virtqueue_store_flags_packed(dp
, flags
, vq
->hw
->weak_barriers
);
527 virtqueue_enqueue_xmit(struct virtnet_tx
*txvq
, struct rte_mbuf
*cookie
,
528 uint16_t needed
, int use_indirect
, int can_push
,
531 struct virtio_tx_region
*txr
= txvq
->virtio_net_hdr_mz
->addr
;
532 struct vq_desc_extra
*dxp
;
533 struct virtqueue
*vq
= txvq
->vq
;
534 struct vring_desc
*start_dp
;
535 uint16_t seg_num
= cookie
->nb_segs
;
536 uint16_t head_idx
, idx
;
537 int16_t head_size
= vq
->hw
->vtnet_hdr_size
;
538 bool prepend_header
= false;
539 struct virtio_net_hdr
*hdr
;
541 head_idx
= vq
->vq_desc_head_idx
;
544 dxp
= &vq
->vq_descx
[vq
->vq_avail_idx
& (vq
->vq_nentries
- 1)];
546 dxp
= &vq
->vq_descx
[idx
];
547 dxp
->cookie
= (void *)cookie
;
548 dxp
->ndescs
= needed
;
550 start_dp
= vq
->vq_split
.ring
.desc
;
553 /* prepend cannot fail, checked by caller */
554 hdr
= rte_pktmbuf_mtod_offset(cookie
, struct virtio_net_hdr
*,
556 prepend_header
= true;
558 /* if offload disabled, it is not zeroed below, do it now */
559 if (!vq
->hw
->has_tx_offload
)
560 virtqueue_clear_net_hdr(hdr
);
561 } else if (use_indirect
) {
562 /* setup tx ring slot to point to indirect
563 * descriptor list stored in reserved region.
565 * the first slot in indirect ring is already preset
566 * to point to the header in reserved region
568 start_dp
[idx
].addr
= txvq
->virtio_net_hdr_mem
+
569 RTE_PTR_DIFF(&txr
[idx
].tx_indir
, txr
);
570 start_dp
[idx
].len
= (seg_num
+ 1) * sizeof(struct vring_desc
);
571 start_dp
[idx
].flags
= VRING_DESC_F_INDIRECT
;
572 hdr
= (struct virtio_net_hdr
*)&txr
[idx
].tx_hdr
;
574 /* loop below will fill in rest of the indirect elements */
575 start_dp
= txr
[idx
].tx_indir
;
578 /* setup first tx ring slot to point to header
579 * stored in reserved region.
581 start_dp
[idx
].addr
= txvq
->virtio_net_hdr_mem
+
582 RTE_PTR_DIFF(&txr
[idx
].tx_hdr
, txr
);
583 start_dp
[idx
].len
= vq
->hw
->vtnet_hdr_size
;
584 start_dp
[idx
].flags
= VRING_DESC_F_NEXT
;
585 hdr
= (struct virtio_net_hdr
*)&txr
[idx
].tx_hdr
;
587 idx
= start_dp
[idx
].next
;
590 virtqueue_xmit_offload(hdr
, cookie
, vq
->hw
->has_tx_offload
);
593 start_dp
[idx
].addr
= VIRTIO_MBUF_DATA_DMA_ADDR(cookie
, vq
);
594 start_dp
[idx
].len
= cookie
->data_len
;
595 if (prepend_header
) {
596 start_dp
[idx
].addr
-= head_size
;
597 start_dp
[idx
].len
+= head_size
;
598 prepend_header
= false;
600 start_dp
[idx
].flags
= cookie
->next
? VRING_DESC_F_NEXT
: 0;
601 idx
= start_dp
[idx
].next
;
602 } while ((cookie
= cookie
->next
) != NULL
);
605 idx
= vq
->vq_split
.ring
.desc
[head_idx
].next
;
607 vq
->vq_free_cnt
= (uint16_t)(vq
->vq_free_cnt
- needed
);
609 vq
->vq_desc_head_idx
= idx
;
610 vq_update_avail_ring(vq
, head_idx
);
613 if (vq
->vq_desc_head_idx
== VQ_RING_DESC_CHAIN_END
)
614 vq
->vq_desc_tail_idx
= idx
;
619 virtio_dev_cq_start(struct rte_eth_dev
*dev
)
621 struct virtio_hw
*hw
= dev
->data
->dev_private
;
623 if (hw
->cvq
&& hw
->cvq
->vq
) {
624 rte_spinlock_init(&hw
->cvq
->lock
);
625 VIRTQUEUE_DUMP((struct virtqueue
*)hw
->cvq
->vq
);
630 virtio_dev_rx_queue_setup(struct rte_eth_dev
*dev
,
633 unsigned int socket_id __rte_unused
,
634 const struct rte_eth_rxconf
*rx_conf
,
635 struct rte_mempool
*mp
)
637 uint16_t vtpci_queue_idx
= 2 * queue_idx
+ VTNET_SQ_RQ_QUEUE_IDX
;
638 struct virtio_hw
*hw
= dev
->data
->dev_private
;
639 struct virtqueue
*vq
= hw
->vqs
[vtpci_queue_idx
];
640 struct virtnet_rx
*rxvq
;
641 uint16_t rx_free_thresh
;
643 PMD_INIT_FUNC_TRACE();
645 if (rx_conf
->rx_deferred_start
) {
646 PMD_INIT_LOG(ERR
, "Rx deferred start is not supported");
650 rx_free_thresh
= rx_conf
->rx_free_thresh
;
651 if (rx_free_thresh
== 0)
653 RTE_MIN(vq
->vq_nentries
/ 4, DEFAULT_RX_FREE_THRESH
);
655 if (rx_free_thresh
& 0x3) {
656 RTE_LOG(ERR
, PMD
, "rx_free_thresh must be multiples of four."
657 " (rx_free_thresh=%u port=%u queue=%u)\n",
658 rx_free_thresh
, dev
->data
->port_id
, queue_idx
);
662 if (rx_free_thresh
>= vq
->vq_nentries
) {
663 RTE_LOG(ERR
, PMD
, "rx_free_thresh must be less than the "
664 "number of RX entries (%u)."
665 " (rx_free_thresh=%u port=%u queue=%u)\n",
667 rx_free_thresh
, dev
->data
->port_id
, queue_idx
);
670 vq
->vq_free_thresh
= rx_free_thresh
;
672 if (nb_desc
== 0 || nb_desc
> vq
->vq_nentries
)
673 nb_desc
= vq
->vq_nentries
;
674 vq
->vq_free_cnt
= RTE_MIN(vq
->vq_free_cnt
, nb_desc
);
677 rxvq
->queue_id
= queue_idx
;
679 dev
->data
->rx_queues
[queue_idx
] = rxvq
;
685 virtio_dev_rx_queue_setup_finish(struct rte_eth_dev
*dev
, uint16_t queue_idx
)
687 uint16_t vtpci_queue_idx
= 2 * queue_idx
+ VTNET_SQ_RQ_QUEUE_IDX
;
688 struct virtio_hw
*hw
= dev
->data
->dev_private
;
689 struct virtqueue
*vq
= hw
->vqs
[vtpci_queue_idx
];
690 struct virtnet_rx
*rxvq
= &vq
->rxq
;
694 bool in_order
= vtpci_with_feature(hw
, VIRTIO_F_IN_ORDER
);
696 PMD_INIT_FUNC_TRACE();
698 /* Allocate blank mbufs for the each rx descriptor */
701 if (hw
->use_vec_rx
&& !vtpci_packed_queue(hw
)) {
702 for (desc_idx
= 0; desc_idx
< vq
->vq_nentries
;
704 vq
->vq_split
.ring
.avail
->ring
[desc_idx
] = desc_idx
;
705 vq
->vq_split
.ring
.desc
[desc_idx
].flags
=
709 virtio_rxq_vec_setup(rxvq
);
712 memset(&rxvq
->fake_mbuf
, 0, sizeof(rxvq
->fake_mbuf
));
713 for (desc_idx
= 0; desc_idx
< RTE_PMD_VIRTIO_RX_MAX_BURST
;
715 vq
->sw_ring
[vq
->vq_nentries
+ desc_idx
] =
719 if (hw
->use_vec_rx
&& !vtpci_packed_queue(hw
)) {
720 while (vq
->vq_free_cnt
>= RTE_VIRTIO_VPMD_RX_REARM_THRESH
) {
721 virtio_rxq_rearm_vec(rxvq
);
722 nbufs
+= RTE_VIRTIO_VPMD_RX_REARM_THRESH
;
724 } else if (!vtpci_packed_queue(vq
->hw
) && in_order
) {
725 if ((!virtqueue_full(vq
))) {
726 uint16_t free_cnt
= vq
->vq_free_cnt
;
727 struct rte_mbuf
*pkts
[free_cnt
];
729 if (!rte_pktmbuf_alloc_bulk(rxvq
->mpool
, pkts
,
731 error
= virtqueue_enqueue_refill_inorder(vq
,
734 if (unlikely(error
)) {
735 for (i
= 0; i
< free_cnt
; i
++)
736 rte_pktmbuf_free(pkts
[i
]);
741 vq_update_avail_idx(vq
);
744 while (!virtqueue_full(vq
)) {
745 m
= rte_mbuf_raw_alloc(rxvq
->mpool
);
749 /* Enqueue allocated buffers */
750 if (vtpci_packed_queue(vq
->hw
))
751 error
= virtqueue_enqueue_recv_refill_packed(vq
,
754 error
= virtqueue_enqueue_recv_refill(vq
,
763 if (!vtpci_packed_queue(vq
->hw
))
764 vq_update_avail_idx(vq
);
767 PMD_INIT_LOG(DEBUG
, "Allocated %d bufs", nbufs
);
775 * struct rte_eth_dev *dev: Used to update dev
776 * uint16_t nb_desc: Defaults to values read from config space
777 * unsigned int socket_id: Used to allocate memzone
778 * const struct rte_eth_txconf *tx_conf: Used to setup tx engine
779 * uint16_t queue_idx: Just used as an index in dev txq list
782 virtio_dev_tx_queue_setup(struct rte_eth_dev
*dev
,
785 unsigned int socket_id __rte_unused
,
786 const struct rte_eth_txconf
*tx_conf
)
788 uint8_t vtpci_queue_idx
= 2 * queue_idx
+ VTNET_SQ_TQ_QUEUE_IDX
;
789 struct virtio_hw
*hw
= dev
->data
->dev_private
;
790 struct virtqueue
*vq
= hw
->vqs
[vtpci_queue_idx
];
791 struct virtnet_tx
*txvq
;
792 uint16_t tx_free_thresh
;
794 PMD_INIT_FUNC_TRACE();
796 if (tx_conf
->tx_deferred_start
) {
797 PMD_INIT_LOG(ERR
, "Tx deferred start is not supported");
801 if (nb_desc
== 0 || nb_desc
> vq
->vq_nentries
)
802 nb_desc
= vq
->vq_nentries
;
803 vq
->vq_free_cnt
= RTE_MIN(vq
->vq_free_cnt
, nb_desc
);
806 txvq
->queue_id
= queue_idx
;
808 tx_free_thresh
= tx_conf
->tx_free_thresh
;
809 if (tx_free_thresh
== 0)
811 RTE_MIN(vq
->vq_nentries
/ 4, DEFAULT_TX_FREE_THRESH
);
813 if (tx_free_thresh
>= (vq
->vq_nentries
- 3)) {
814 PMD_DRV_LOG(ERR
, "tx_free_thresh must be less than the "
815 "number of TX entries minus 3 (%u)."
816 " (tx_free_thresh=%u port=%u queue=%u)\n",
818 tx_free_thresh
, dev
->data
->port_id
, queue_idx
);
822 vq
->vq_free_thresh
= tx_free_thresh
;
824 dev
->data
->tx_queues
[queue_idx
] = txvq
;
829 virtio_dev_tx_queue_setup_finish(struct rte_eth_dev
*dev
,
832 uint8_t vtpci_queue_idx
= 2 * queue_idx
+ VTNET_SQ_TQ_QUEUE_IDX
;
833 struct virtio_hw
*hw
= dev
->data
->dev_private
;
834 struct virtqueue
*vq
= hw
->vqs
[vtpci_queue_idx
];
836 PMD_INIT_FUNC_TRACE();
838 if (!vtpci_packed_queue(hw
)) {
839 if (vtpci_with_feature(hw
, VIRTIO_F_IN_ORDER
))
840 vq
->vq_split
.ring
.desc
[vq
->vq_nentries
- 1].next
= 0;
849 virtio_discard_rxbuf(struct virtqueue
*vq
, struct rte_mbuf
*m
)
853 * Requeue the discarded mbuf. This should always be
854 * successful since it was just dequeued.
856 if (vtpci_packed_queue(vq
->hw
))
857 error
= virtqueue_enqueue_recv_refill_packed(vq
, &m
, 1);
859 error
= virtqueue_enqueue_recv_refill(vq
, &m
, 1);
861 if (unlikely(error
)) {
862 PMD_DRV_LOG(ERR
, "cannot requeue discarded mbuf");
868 virtio_discard_rxbuf_inorder(struct virtqueue
*vq
, struct rte_mbuf
*m
)
872 error
= virtqueue_enqueue_refill_inorder(vq
, &m
, 1);
873 if (unlikely(error
)) {
874 PMD_DRV_LOG(ERR
, "cannot requeue discarded mbuf");
879 /* Optionally fill offload information in structure */
881 virtio_rx_offload(struct rte_mbuf
*m
, struct virtio_net_hdr
*hdr
)
883 struct rte_net_hdr_lens hdr_lens
;
884 uint32_t hdrlen
, ptype
;
885 int l4_supported
= 0;
888 if (hdr
->flags
== 0 && hdr
->gso_type
== VIRTIO_NET_HDR_GSO_NONE
)
891 m
->ol_flags
|= PKT_RX_IP_CKSUM_UNKNOWN
;
893 ptype
= rte_net_get_ptype(m
, &hdr_lens
, RTE_PTYPE_ALL_MASK
);
894 m
->packet_type
= ptype
;
895 if ((ptype
& RTE_PTYPE_L4_MASK
) == RTE_PTYPE_L4_TCP
||
896 (ptype
& RTE_PTYPE_L4_MASK
) == RTE_PTYPE_L4_UDP
||
897 (ptype
& RTE_PTYPE_L4_MASK
) == RTE_PTYPE_L4_SCTP
)
900 if (hdr
->flags
& VIRTIO_NET_HDR_F_NEEDS_CSUM
) {
901 hdrlen
= hdr_lens
.l2_len
+ hdr_lens
.l3_len
+ hdr_lens
.l4_len
;
902 if (hdr
->csum_start
<= hdrlen
&& l4_supported
) {
903 m
->ol_flags
|= PKT_RX_L4_CKSUM_NONE
;
905 /* Unknown proto or tunnel, do sw cksum. We can assume
906 * the cksum field is in the first segment since the
907 * buffers we provided to the host are large enough.
908 * In case of SCTP, this will be wrong since it's a CRC
909 * but there's nothing we can do.
911 uint16_t csum
= 0, off
;
913 rte_raw_cksum_mbuf(m
, hdr
->csum_start
,
914 rte_pktmbuf_pkt_len(m
) - hdr
->csum_start
,
916 if (likely(csum
!= 0xffff))
918 off
= hdr
->csum_offset
+ hdr
->csum_start
;
919 if (rte_pktmbuf_data_len(m
) >= off
+ 1)
920 *rte_pktmbuf_mtod_offset(m
, uint16_t *,
923 } else if (hdr
->flags
& VIRTIO_NET_HDR_F_DATA_VALID
&& l4_supported
) {
924 m
->ol_flags
|= PKT_RX_L4_CKSUM_GOOD
;
927 /* GSO request, save required information in mbuf */
928 if (hdr
->gso_type
!= VIRTIO_NET_HDR_GSO_NONE
) {
929 /* Check unsupported modes */
930 if ((hdr
->gso_type
& VIRTIO_NET_HDR_GSO_ECN
) ||
931 (hdr
->gso_size
== 0)) {
935 /* Update mss lengthes in mbuf */
936 m
->tso_segsz
= hdr
->gso_size
;
937 switch (hdr
->gso_type
& ~VIRTIO_NET_HDR_GSO_ECN
) {
938 case VIRTIO_NET_HDR_GSO_TCPV4
:
939 case VIRTIO_NET_HDR_GSO_TCPV6
:
940 m
->ol_flags
|= PKT_RX_LRO
| \
941 PKT_RX_L4_CKSUM_NONE
;
951 #define DESC_PER_CACHELINE (RTE_CACHE_LINE_SIZE / sizeof(struct vring_desc))
953 virtio_recv_pkts(void *rx_queue
, struct rte_mbuf
**rx_pkts
, uint16_t nb_pkts
)
955 struct virtnet_rx
*rxvq
= rx_queue
;
956 struct virtqueue
*vq
= rxvq
->vq
;
957 struct virtio_hw
*hw
= vq
->hw
;
958 struct rte_mbuf
*rxm
;
959 uint16_t nb_used
, num
, nb_rx
;
960 uint32_t len
[VIRTIO_MBUF_BURST_SZ
];
961 struct rte_mbuf
*rcv_pkts
[VIRTIO_MBUF_BURST_SZ
];
963 uint32_t i
, nb_enqueued
;
965 struct virtio_net_hdr
*hdr
;
968 if (unlikely(hw
->started
== 0))
971 nb_used
= virtqueue_nused(vq
);
973 num
= likely(nb_used
<= nb_pkts
) ? nb_used
: nb_pkts
;
974 if (unlikely(num
> VIRTIO_MBUF_BURST_SZ
))
975 num
= VIRTIO_MBUF_BURST_SZ
;
976 if (likely(num
> DESC_PER_CACHELINE
))
977 num
= num
- ((vq
->vq_used_cons_idx
+ num
) % DESC_PER_CACHELINE
);
979 num
= virtqueue_dequeue_burst_rx(vq
, rcv_pkts
, len
, num
);
980 PMD_RX_LOG(DEBUG
, "used:%d dequeue:%d", nb_used
, num
);
983 hdr_size
= hw
->vtnet_hdr_size
;
985 for (i
= 0; i
< num
; i
++) {
988 PMD_RX_LOG(DEBUG
, "packet len:%d", len
[i
]);
990 if (unlikely(len
[i
] < hdr_size
+ RTE_ETHER_HDR_LEN
)) {
991 PMD_RX_LOG(ERR
, "Packet drop");
993 virtio_discard_rxbuf(vq
, rxm
);
994 rxvq
->stats
.errors
++;
998 rxm
->port
= rxvq
->port_id
;
999 rxm
->data_off
= RTE_PKTMBUF_HEADROOM
;
1003 rxm
->pkt_len
= (uint32_t)(len
[i
] - hdr_size
);
1004 rxm
->data_len
= (uint16_t)(len
[i
] - hdr_size
);
1006 hdr
= (struct virtio_net_hdr
*)((char *)rxm
->buf_addr
+
1007 RTE_PKTMBUF_HEADROOM
- hdr_size
);
1010 rte_vlan_strip(rxm
);
1012 if (hw
->has_rx_offload
&& virtio_rx_offload(rxm
, hdr
) < 0) {
1013 virtio_discard_rxbuf(vq
, rxm
);
1014 rxvq
->stats
.errors
++;
1018 virtio_rx_stats_updated(rxvq
, rxm
);
1020 rx_pkts
[nb_rx
++] = rxm
;
1023 rxvq
->stats
.packets
+= nb_rx
;
1025 /* Allocate new mbuf for the used descriptor */
1026 if (likely(!virtqueue_full(vq
))) {
1027 uint16_t free_cnt
= vq
->vq_free_cnt
;
1028 struct rte_mbuf
*new_pkts
[free_cnt
];
1030 if (likely(rte_pktmbuf_alloc_bulk(rxvq
->mpool
, new_pkts
,
1032 error
= virtqueue_enqueue_recv_refill(vq
, new_pkts
,
1034 if (unlikely(error
)) {
1035 for (i
= 0; i
< free_cnt
; i
++)
1036 rte_pktmbuf_free(new_pkts
[i
]);
1038 nb_enqueued
+= free_cnt
;
1040 struct rte_eth_dev
*dev
=
1041 &rte_eth_devices
[rxvq
->port_id
];
1042 dev
->data
->rx_mbuf_alloc_failed
+= free_cnt
;
1046 if (likely(nb_enqueued
)) {
1047 vq_update_avail_idx(vq
);
1049 if (unlikely(virtqueue_kick_prepare(vq
))) {
1050 virtqueue_notify(vq
);
1051 PMD_RX_LOG(DEBUG
, "Notified");
1059 virtio_recv_pkts_packed(void *rx_queue
, struct rte_mbuf
**rx_pkts
,
1062 struct virtnet_rx
*rxvq
= rx_queue
;
1063 struct virtqueue
*vq
= rxvq
->vq
;
1064 struct virtio_hw
*hw
= vq
->hw
;
1065 struct rte_mbuf
*rxm
;
1066 uint16_t num
, nb_rx
;
1067 uint32_t len
[VIRTIO_MBUF_BURST_SZ
];
1068 struct rte_mbuf
*rcv_pkts
[VIRTIO_MBUF_BURST_SZ
];
1070 uint32_t i
, nb_enqueued
;
1072 struct virtio_net_hdr
*hdr
;
1075 if (unlikely(hw
->started
== 0))
1078 num
= RTE_MIN(VIRTIO_MBUF_BURST_SZ
, nb_pkts
);
1079 if (likely(num
> DESC_PER_CACHELINE
))
1080 num
= num
- ((vq
->vq_used_cons_idx
+ num
) % DESC_PER_CACHELINE
);
1082 num
= virtqueue_dequeue_burst_rx_packed(vq
, rcv_pkts
, len
, num
);
1083 PMD_RX_LOG(DEBUG
, "dequeue:%d", num
);
1086 hdr_size
= hw
->vtnet_hdr_size
;
1088 for (i
= 0; i
< num
; i
++) {
1091 PMD_RX_LOG(DEBUG
, "packet len:%d", len
[i
]);
1093 if (unlikely(len
[i
] < hdr_size
+ RTE_ETHER_HDR_LEN
)) {
1094 PMD_RX_LOG(ERR
, "Packet drop");
1096 virtio_discard_rxbuf(vq
, rxm
);
1097 rxvq
->stats
.errors
++;
1101 rxm
->port
= rxvq
->port_id
;
1102 rxm
->data_off
= RTE_PKTMBUF_HEADROOM
;
1106 rxm
->pkt_len
= (uint32_t)(len
[i
] - hdr_size
);
1107 rxm
->data_len
= (uint16_t)(len
[i
] - hdr_size
);
1109 hdr
= (struct virtio_net_hdr
*)((char *)rxm
->buf_addr
+
1110 RTE_PKTMBUF_HEADROOM
- hdr_size
);
1113 rte_vlan_strip(rxm
);
1115 if (hw
->has_rx_offload
&& virtio_rx_offload(rxm
, hdr
) < 0) {
1116 virtio_discard_rxbuf(vq
, rxm
);
1117 rxvq
->stats
.errors
++;
1121 virtio_rx_stats_updated(rxvq
, rxm
);
1123 rx_pkts
[nb_rx
++] = rxm
;
1126 rxvq
->stats
.packets
+= nb_rx
;
1128 /* Allocate new mbuf for the used descriptor */
1129 if (likely(!virtqueue_full(vq
))) {
1130 uint16_t free_cnt
= vq
->vq_free_cnt
;
1131 struct rte_mbuf
*new_pkts
[free_cnt
];
1133 if (likely(rte_pktmbuf_alloc_bulk(rxvq
->mpool
, new_pkts
,
1135 error
= virtqueue_enqueue_recv_refill_packed(vq
,
1136 new_pkts
, free_cnt
);
1137 if (unlikely(error
)) {
1138 for (i
= 0; i
< free_cnt
; i
++)
1139 rte_pktmbuf_free(new_pkts
[i
]);
1141 nb_enqueued
+= free_cnt
;
1143 struct rte_eth_dev
*dev
=
1144 &rte_eth_devices
[rxvq
->port_id
];
1145 dev
->data
->rx_mbuf_alloc_failed
+= free_cnt
;
1149 if (likely(nb_enqueued
)) {
1150 if (unlikely(virtqueue_kick_prepare_packed(vq
))) {
1151 virtqueue_notify(vq
);
1152 PMD_RX_LOG(DEBUG
, "Notified");
1161 virtio_recv_pkts_inorder(void *rx_queue
,
1162 struct rte_mbuf
**rx_pkts
,
1165 struct virtnet_rx
*rxvq
= rx_queue
;
1166 struct virtqueue
*vq
= rxvq
->vq
;
1167 struct virtio_hw
*hw
= vq
->hw
;
1168 struct rte_mbuf
*rxm
;
1169 struct rte_mbuf
*prev
= NULL
;
1170 uint16_t nb_used
, num
, nb_rx
;
1171 uint32_t len
[VIRTIO_MBUF_BURST_SZ
];
1172 struct rte_mbuf
*rcv_pkts
[VIRTIO_MBUF_BURST_SZ
];
1174 uint32_t nb_enqueued
;
1181 if (unlikely(hw
->started
== 0))
1184 nb_used
= virtqueue_nused(vq
);
1185 nb_used
= RTE_MIN(nb_used
, nb_pkts
);
1186 nb_used
= RTE_MIN(nb_used
, VIRTIO_MBUF_BURST_SZ
);
1188 PMD_RX_LOG(DEBUG
, "used:%d", nb_used
);
1193 hdr_size
= hw
->vtnet_hdr_size
;
1195 num
= virtqueue_dequeue_rx_inorder(vq
, rcv_pkts
, len
, nb_used
);
1197 for (i
= 0; i
< num
; i
++) {
1198 struct virtio_net_hdr_mrg_rxbuf
*header
;
1200 PMD_RX_LOG(DEBUG
, "dequeue:%d", num
);
1201 PMD_RX_LOG(DEBUG
, "packet len:%d", len
[i
]);
1205 if (unlikely(len
[i
] < hdr_size
+ RTE_ETHER_HDR_LEN
)) {
1206 PMD_RX_LOG(ERR
, "Packet drop");
1208 virtio_discard_rxbuf_inorder(vq
, rxm
);
1209 rxvq
->stats
.errors
++;
1213 header
= (struct virtio_net_hdr_mrg_rxbuf
*)
1214 ((char *)rxm
->buf_addr
+ RTE_PKTMBUF_HEADROOM
1217 if (vtpci_with_feature(hw
, VIRTIO_NET_F_MRG_RXBUF
)) {
1218 seg_num
= header
->num_buffers
;
1225 rxm
->data_off
= RTE_PKTMBUF_HEADROOM
;
1226 rxm
->nb_segs
= seg_num
;
1229 rxm
->pkt_len
= (uint32_t)(len
[i
] - hdr_size
);
1230 rxm
->data_len
= (uint16_t)(len
[i
] - hdr_size
);
1232 rxm
->port
= rxvq
->port_id
;
1234 rx_pkts
[nb_rx
] = rxm
;
1237 if (vq
->hw
->has_rx_offload
&&
1238 virtio_rx_offload(rxm
, &header
->hdr
) < 0) {
1239 virtio_discard_rxbuf_inorder(vq
, rxm
);
1240 rxvq
->stats
.errors
++;
1245 rte_vlan_strip(rx_pkts
[nb_rx
]);
1247 seg_res
= seg_num
- 1;
1249 /* Merge remaining segments */
1250 while (seg_res
!= 0 && i
< (num
- 1)) {
1254 rxm
->data_off
= RTE_PKTMBUF_HEADROOM
- hdr_size
;
1255 rxm
->pkt_len
= (uint32_t)(len
[i
]);
1256 rxm
->data_len
= (uint16_t)(len
[i
]);
1258 rx_pkts
[nb_rx
]->pkt_len
+= (uint32_t)(len
[i
]);
1266 virtio_rx_stats_updated(rxvq
, rx_pkts
[nb_rx
]);
1271 /* Last packet still need merge segments */
1272 while (seg_res
!= 0) {
1273 uint16_t rcv_cnt
= RTE_MIN((uint16_t)seg_res
,
1274 VIRTIO_MBUF_BURST_SZ
);
1276 if (likely(virtqueue_nused(vq
) >= rcv_cnt
)) {
1277 num
= virtqueue_dequeue_rx_inorder(vq
, rcv_pkts
, len
,
1279 uint16_t extra_idx
= 0;
1282 while (extra_idx
< rcv_cnt
) {
1283 rxm
= rcv_pkts
[extra_idx
];
1285 RTE_PKTMBUF_HEADROOM
- hdr_size
;
1286 rxm
->pkt_len
= (uint32_t)(len
[extra_idx
]);
1287 rxm
->data_len
= (uint16_t)(len
[extra_idx
]);
1290 rx_pkts
[nb_rx
]->pkt_len
+= len
[extra_idx
];
1296 virtio_rx_stats_updated(rxvq
, rx_pkts
[nb_rx
]);
1301 "No enough segments for packet.");
1302 rte_pktmbuf_free(rx_pkts
[nb_rx
]);
1303 rxvq
->stats
.errors
++;
1308 rxvq
->stats
.packets
+= nb_rx
;
1310 /* Allocate new mbuf for the used descriptor */
1312 if (likely(!virtqueue_full(vq
))) {
1313 /* free_cnt may include mrg descs */
1314 uint16_t free_cnt
= vq
->vq_free_cnt
;
1315 struct rte_mbuf
*new_pkts
[free_cnt
];
1317 if (!rte_pktmbuf_alloc_bulk(rxvq
->mpool
, new_pkts
, free_cnt
)) {
1318 error
= virtqueue_enqueue_refill_inorder(vq
, new_pkts
,
1320 if (unlikely(error
)) {
1321 for (i
= 0; i
< free_cnt
; i
++)
1322 rte_pktmbuf_free(new_pkts
[i
]);
1324 nb_enqueued
+= free_cnt
;
1326 struct rte_eth_dev
*dev
=
1327 &rte_eth_devices
[rxvq
->port_id
];
1328 dev
->data
->rx_mbuf_alloc_failed
+= free_cnt
;
1332 if (likely(nb_enqueued
)) {
1333 vq_update_avail_idx(vq
);
1335 if (unlikely(virtqueue_kick_prepare(vq
))) {
1336 virtqueue_notify(vq
);
1337 PMD_RX_LOG(DEBUG
, "Notified");
1345 virtio_recv_mergeable_pkts(void *rx_queue
,
1346 struct rte_mbuf
**rx_pkts
,
1349 struct virtnet_rx
*rxvq
= rx_queue
;
1350 struct virtqueue
*vq
= rxvq
->vq
;
1351 struct virtio_hw
*hw
= vq
->hw
;
1352 struct rte_mbuf
*rxm
;
1353 struct rte_mbuf
*prev
= NULL
;
1354 uint16_t nb_used
, num
, nb_rx
= 0;
1355 uint32_t len
[VIRTIO_MBUF_BURST_SZ
];
1356 struct rte_mbuf
*rcv_pkts
[VIRTIO_MBUF_BURST_SZ
];
1358 uint32_t nb_enqueued
= 0;
1359 uint32_t seg_num
= 0;
1360 uint32_t seg_res
= 0;
1361 uint32_t hdr_size
= hw
->vtnet_hdr_size
;
1364 if (unlikely(hw
->started
== 0))
1367 nb_used
= virtqueue_nused(vq
);
1369 PMD_RX_LOG(DEBUG
, "used:%d", nb_used
);
1371 num
= likely(nb_used
<= nb_pkts
) ? nb_used
: nb_pkts
;
1372 if (unlikely(num
> VIRTIO_MBUF_BURST_SZ
))
1373 num
= VIRTIO_MBUF_BURST_SZ
;
1374 if (likely(num
> DESC_PER_CACHELINE
))
1375 num
= num
- ((vq
->vq_used_cons_idx
+ num
) %
1376 DESC_PER_CACHELINE
);
1379 num
= virtqueue_dequeue_burst_rx(vq
, rcv_pkts
, len
, num
);
1381 for (i
= 0; i
< num
; i
++) {
1382 struct virtio_net_hdr_mrg_rxbuf
*header
;
1384 PMD_RX_LOG(DEBUG
, "dequeue:%d", num
);
1385 PMD_RX_LOG(DEBUG
, "packet len:%d", len
[i
]);
1389 if (unlikely(len
[i
] < hdr_size
+ RTE_ETHER_HDR_LEN
)) {
1390 PMD_RX_LOG(ERR
, "Packet drop");
1392 virtio_discard_rxbuf(vq
, rxm
);
1393 rxvq
->stats
.errors
++;
1397 header
= (struct virtio_net_hdr_mrg_rxbuf
*)
1398 ((char *)rxm
->buf_addr
+ RTE_PKTMBUF_HEADROOM
1400 seg_num
= header
->num_buffers
;
1404 rxm
->data_off
= RTE_PKTMBUF_HEADROOM
;
1405 rxm
->nb_segs
= seg_num
;
1408 rxm
->pkt_len
= (uint32_t)(len
[i
] - hdr_size
);
1409 rxm
->data_len
= (uint16_t)(len
[i
] - hdr_size
);
1411 rxm
->port
= rxvq
->port_id
;
1413 rx_pkts
[nb_rx
] = rxm
;
1416 if (hw
->has_rx_offload
&&
1417 virtio_rx_offload(rxm
, &header
->hdr
) < 0) {
1418 virtio_discard_rxbuf(vq
, rxm
);
1419 rxvq
->stats
.errors
++;
1424 rte_vlan_strip(rx_pkts
[nb_rx
]);
1426 seg_res
= seg_num
- 1;
1428 /* Merge remaining segments */
1429 while (seg_res
!= 0 && i
< (num
- 1)) {
1433 rxm
->data_off
= RTE_PKTMBUF_HEADROOM
- hdr_size
;
1434 rxm
->pkt_len
= (uint32_t)(len
[i
]);
1435 rxm
->data_len
= (uint16_t)(len
[i
]);
1437 rx_pkts
[nb_rx
]->pkt_len
+= (uint32_t)(len
[i
]);
1445 virtio_rx_stats_updated(rxvq
, rx_pkts
[nb_rx
]);
1450 /* Last packet still need merge segments */
1451 while (seg_res
!= 0) {
1452 uint16_t rcv_cnt
= RTE_MIN((uint16_t)seg_res
,
1453 VIRTIO_MBUF_BURST_SZ
);
1455 if (likely(virtqueue_nused(vq
) >= rcv_cnt
)) {
1456 num
= virtqueue_dequeue_burst_rx(vq
, rcv_pkts
, len
,
1458 uint16_t extra_idx
= 0;
1461 while (extra_idx
< rcv_cnt
) {
1462 rxm
= rcv_pkts
[extra_idx
];
1464 RTE_PKTMBUF_HEADROOM
- hdr_size
;
1465 rxm
->pkt_len
= (uint32_t)(len
[extra_idx
]);
1466 rxm
->data_len
= (uint16_t)(len
[extra_idx
]);
1469 rx_pkts
[nb_rx
]->pkt_len
+= len
[extra_idx
];
1475 virtio_rx_stats_updated(rxvq
, rx_pkts
[nb_rx
]);
1480 "No enough segments for packet.");
1481 rte_pktmbuf_free(rx_pkts
[nb_rx
]);
1482 rxvq
->stats
.errors
++;
1487 rxvq
->stats
.packets
+= nb_rx
;
1489 /* Allocate new mbuf for the used descriptor */
1490 if (likely(!virtqueue_full(vq
))) {
1491 /* free_cnt may include mrg descs */
1492 uint16_t free_cnt
= vq
->vq_free_cnt
;
1493 struct rte_mbuf
*new_pkts
[free_cnt
];
1495 if (!rte_pktmbuf_alloc_bulk(rxvq
->mpool
, new_pkts
, free_cnt
)) {
1496 error
= virtqueue_enqueue_recv_refill(vq
, new_pkts
,
1498 if (unlikely(error
)) {
1499 for (i
= 0; i
< free_cnt
; i
++)
1500 rte_pktmbuf_free(new_pkts
[i
]);
1502 nb_enqueued
+= free_cnt
;
1504 struct rte_eth_dev
*dev
=
1505 &rte_eth_devices
[rxvq
->port_id
];
1506 dev
->data
->rx_mbuf_alloc_failed
+= free_cnt
;
1510 if (likely(nb_enqueued
)) {
1511 vq_update_avail_idx(vq
);
1513 if (unlikely(virtqueue_kick_prepare(vq
))) {
1514 virtqueue_notify(vq
);
1515 PMD_RX_LOG(DEBUG
, "Notified");
1523 virtio_recv_mergeable_pkts_packed(void *rx_queue
,
1524 struct rte_mbuf
**rx_pkts
,
1527 struct virtnet_rx
*rxvq
= rx_queue
;
1528 struct virtqueue
*vq
= rxvq
->vq
;
1529 struct virtio_hw
*hw
= vq
->hw
;
1530 struct rte_mbuf
*rxm
;
1531 struct rte_mbuf
*prev
= NULL
;
1532 uint16_t num
, nb_rx
= 0;
1533 uint32_t len
[VIRTIO_MBUF_BURST_SZ
];
1534 struct rte_mbuf
*rcv_pkts
[VIRTIO_MBUF_BURST_SZ
];
1535 uint32_t nb_enqueued
= 0;
1536 uint32_t seg_num
= 0;
1537 uint32_t seg_res
= 0;
1538 uint32_t hdr_size
= hw
->vtnet_hdr_size
;
1542 if (unlikely(hw
->started
== 0))
1547 if (unlikely(num
> VIRTIO_MBUF_BURST_SZ
))
1548 num
= VIRTIO_MBUF_BURST_SZ
;
1549 if (likely(num
> DESC_PER_CACHELINE
))
1550 num
= num
- ((vq
->vq_used_cons_idx
+ num
) % DESC_PER_CACHELINE
);
1552 num
= virtqueue_dequeue_burst_rx_packed(vq
, rcv_pkts
, len
, num
);
1554 for (i
= 0; i
< num
; i
++) {
1555 struct virtio_net_hdr_mrg_rxbuf
*header
;
1557 PMD_RX_LOG(DEBUG
, "dequeue:%d", num
);
1558 PMD_RX_LOG(DEBUG
, "packet len:%d", len
[i
]);
1562 if (unlikely(len
[i
] < hdr_size
+ RTE_ETHER_HDR_LEN
)) {
1563 PMD_RX_LOG(ERR
, "Packet drop");
1565 virtio_discard_rxbuf(vq
, rxm
);
1566 rxvq
->stats
.errors
++;
1570 header
= (struct virtio_net_hdr_mrg_rxbuf
*)((char *)
1571 rxm
->buf_addr
+ RTE_PKTMBUF_HEADROOM
- hdr_size
);
1572 seg_num
= header
->num_buffers
;
1577 rxm
->data_off
= RTE_PKTMBUF_HEADROOM
;
1578 rxm
->nb_segs
= seg_num
;
1581 rxm
->pkt_len
= (uint32_t)(len
[i
] - hdr_size
);
1582 rxm
->data_len
= (uint16_t)(len
[i
] - hdr_size
);
1584 rxm
->port
= rxvq
->port_id
;
1585 rx_pkts
[nb_rx
] = rxm
;
1588 if (hw
->has_rx_offload
&&
1589 virtio_rx_offload(rxm
, &header
->hdr
) < 0) {
1590 virtio_discard_rxbuf(vq
, rxm
);
1591 rxvq
->stats
.errors
++;
1596 rte_vlan_strip(rx_pkts
[nb_rx
]);
1598 seg_res
= seg_num
- 1;
1600 /* Merge remaining segments */
1601 while (seg_res
!= 0 && i
< (num
- 1)) {
1605 rxm
->data_off
= RTE_PKTMBUF_HEADROOM
- hdr_size
;
1606 rxm
->pkt_len
= (uint32_t)(len
[i
]);
1607 rxm
->data_len
= (uint16_t)(len
[i
]);
1609 rx_pkts
[nb_rx
]->pkt_len
+= (uint32_t)(len
[i
]);
1617 virtio_rx_stats_updated(rxvq
, rx_pkts
[nb_rx
]);
1622 /* Last packet still need merge segments */
1623 while (seg_res
!= 0) {
1624 uint16_t rcv_cnt
= RTE_MIN((uint16_t)seg_res
,
1625 VIRTIO_MBUF_BURST_SZ
);
1626 uint16_t extra_idx
= 0;
1628 rcv_cnt
= virtqueue_dequeue_burst_rx_packed(vq
, rcv_pkts
,
1630 if (unlikely(rcv_cnt
== 0)) {
1631 PMD_RX_LOG(ERR
, "No enough segments for packet.");
1632 rte_pktmbuf_free(rx_pkts
[nb_rx
]);
1633 rxvq
->stats
.errors
++;
1637 while (extra_idx
< rcv_cnt
) {
1638 rxm
= rcv_pkts
[extra_idx
];
1640 rxm
->data_off
= RTE_PKTMBUF_HEADROOM
- hdr_size
;
1641 rxm
->pkt_len
= (uint32_t)(len
[extra_idx
]);
1642 rxm
->data_len
= (uint16_t)(len
[extra_idx
]);
1646 rx_pkts
[nb_rx
]->pkt_len
+= len
[extra_idx
];
1651 virtio_rx_stats_updated(rxvq
, rx_pkts
[nb_rx
]);
1656 rxvq
->stats
.packets
+= nb_rx
;
1658 /* Allocate new mbuf for the used descriptor */
1659 if (likely(!virtqueue_full(vq
))) {
1660 /* free_cnt may include mrg descs */
1661 uint16_t free_cnt
= vq
->vq_free_cnt
;
1662 struct rte_mbuf
*new_pkts
[free_cnt
];
1664 if (!rte_pktmbuf_alloc_bulk(rxvq
->mpool
, new_pkts
, free_cnt
)) {
1665 error
= virtqueue_enqueue_recv_refill_packed(vq
,
1666 new_pkts
, free_cnt
);
1667 if (unlikely(error
)) {
1668 for (i
= 0; i
< free_cnt
; i
++)
1669 rte_pktmbuf_free(new_pkts
[i
]);
1671 nb_enqueued
+= free_cnt
;
1673 struct rte_eth_dev
*dev
=
1674 &rte_eth_devices
[rxvq
->port_id
];
1675 dev
->data
->rx_mbuf_alloc_failed
+= free_cnt
;
1679 if (likely(nb_enqueued
)) {
1680 if (unlikely(virtqueue_kick_prepare_packed(vq
))) {
1681 virtqueue_notify(vq
);
1682 PMD_RX_LOG(DEBUG
, "Notified");
1690 virtio_xmit_pkts_prepare(void *tx_queue __rte_unused
, struct rte_mbuf
**tx_pkts
,
1696 for (nb_tx
= 0; nb_tx
< nb_pkts
; nb_tx
++) {
1697 struct rte_mbuf
*m
= tx_pkts
[nb_tx
];
1699 #ifdef RTE_LIBRTE_ETHDEV_DEBUG
1700 error
= rte_validate_tx_offload(m
);
1701 if (unlikely(error
)) {
1707 /* Do VLAN tag insertion */
1708 if (unlikely(m
->ol_flags
& PKT_TX_VLAN_PKT
)) {
1709 error
= rte_vlan_insert(&m
);
1710 /* rte_vlan_insert() may change pointer
1711 * even in the case of failure
1715 if (unlikely(error
)) {
1721 error
= rte_net_intel_cksum_prepare(m
);
1722 if (unlikely(error
)) {
1727 if (m
->ol_flags
& PKT_TX_TCP_SEG
)
1728 virtio_tso_fix_cksum(m
);
1735 virtio_xmit_pkts_packed(void *tx_queue
, struct rte_mbuf
**tx_pkts
,
1738 struct virtnet_tx
*txvq
= tx_queue
;
1739 struct virtqueue
*vq
= txvq
->vq
;
1740 struct virtio_hw
*hw
= vq
->hw
;
1741 uint16_t hdr_size
= hw
->vtnet_hdr_size
;
1743 bool in_order
= vtpci_with_feature(hw
, VIRTIO_F_IN_ORDER
);
1745 if (unlikely(hw
->started
== 0 && tx_pkts
!= hw
->inject_pkts
))
1748 if (unlikely(nb_pkts
< 1))
1751 PMD_TX_LOG(DEBUG
, "%d packets to xmit", nb_pkts
);
1753 if (nb_pkts
> vq
->vq_free_cnt
)
1754 virtio_xmit_cleanup_packed(vq
, nb_pkts
- vq
->vq_free_cnt
,
1757 for (nb_tx
= 0; nb_tx
< nb_pkts
; nb_tx
++) {
1758 struct rte_mbuf
*txm
= tx_pkts
[nb_tx
];
1759 int can_push
= 0, slots
, need
;
1761 /* optimize ring usage */
1762 if ((vtpci_with_feature(hw
, VIRTIO_F_ANY_LAYOUT
) ||
1763 vtpci_with_feature(hw
, VIRTIO_F_VERSION_1
)) &&
1764 rte_mbuf_refcnt_read(txm
) == 1 &&
1765 RTE_MBUF_DIRECT(txm
) &&
1766 txm
->nb_segs
== 1 &&
1767 rte_pktmbuf_headroom(txm
) >= hdr_size
&&
1768 rte_is_aligned(rte_pktmbuf_mtod(txm
, char *),
1769 __alignof__(struct virtio_net_hdr_mrg_rxbuf
)))
1772 /* How many main ring entries are needed to this Tx?
1773 * any_layout => number of segments
1774 * default => number of segments + 1
1776 slots
= txm
->nb_segs
+ !can_push
;
1777 need
= slots
- vq
->vq_free_cnt
;
1779 /* Positive value indicates it need free vring descriptors */
1780 if (unlikely(need
> 0)) {
1781 virtio_xmit_cleanup_packed(vq
, need
, in_order
);
1782 need
= slots
- vq
->vq_free_cnt
;
1783 if (unlikely(need
> 0)) {
1785 "No free tx descriptors to transmit");
1790 /* Enqueue Packet buffers */
1792 virtqueue_enqueue_xmit_packed_fast(txvq
, txm
, in_order
);
1794 virtqueue_enqueue_xmit_packed(txvq
, txm
, slots
, 0,
1797 virtio_update_packet_stats(&txvq
->stats
, txm
);
1800 txvq
->stats
.packets
+= nb_tx
;
1802 if (likely(nb_tx
)) {
1803 if (unlikely(virtqueue_kick_prepare_packed(vq
))) {
1804 virtqueue_notify(vq
);
1805 PMD_TX_LOG(DEBUG
, "Notified backend after xmit");
1813 virtio_xmit_pkts(void *tx_queue
, struct rte_mbuf
**tx_pkts
, uint16_t nb_pkts
)
1815 struct virtnet_tx
*txvq
= tx_queue
;
1816 struct virtqueue
*vq
= txvq
->vq
;
1817 struct virtio_hw
*hw
= vq
->hw
;
1818 uint16_t hdr_size
= hw
->vtnet_hdr_size
;
1819 uint16_t nb_used
, nb_tx
= 0;
1821 if (unlikely(hw
->started
== 0 && tx_pkts
!= hw
->inject_pkts
))
1824 if (unlikely(nb_pkts
< 1))
1827 PMD_TX_LOG(DEBUG
, "%d packets to xmit", nb_pkts
);
1829 nb_used
= virtqueue_nused(vq
);
1831 if (likely(nb_used
> vq
->vq_nentries
- vq
->vq_free_thresh
))
1832 virtio_xmit_cleanup(vq
, nb_used
);
1834 for (nb_tx
= 0; nb_tx
< nb_pkts
; nb_tx
++) {
1835 struct rte_mbuf
*txm
= tx_pkts
[nb_tx
];
1836 int can_push
= 0, use_indirect
= 0, slots
, need
;
1838 /* optimize ring usage */
1839 if ((vtpci_with_feature(hw
, VIRTIO_F_ANY_LAYOUT
) ||
1840 vtpci_with_feature(hw
, VIRTIO_F_VERSION_1
)) &&
1841 rte_mbuf_refcnt_read(txm
) == 1 &&
1842 RTE_MBUF_DIRECT(txm
) &&
1843 txm
->nb_segs
== 1 &&
1844 rte_pktmbuf_headroom(txm
) >= hdr_size
&&
1845 rte_is_aligned(rte_pktmbuf_mtod(txm
, char *),
1846 __alignof__(struct virtio_net_hdr_mrg_rxbuf
)))
1848 else if (vtpci_with_feature(hw
, VIRTIO_RING_F_INDIRECT_DESC
) &&
1849 txm
->nb_segs
< VIRTIO_MAX_TX_INDIRECT
)
1852 /* How many main ring entries are needed to this Tx?
1853 * any_layout => number of segments
1855 * default => number of segments + 1
1857 slots
= use_indirect
? 1 : (txm
->nb_segs
+ !can_push
);
1858 need
= slots
- vq
->vq_free_cnt
;
1860 /* Positive value indicates it need free vring descriptors */
1861 if (unlikely(need
> 0)) {
1862 nb_used
= virtqueue_nused(vq
);
1864 need
= RTE_MIN(need
, (int)nb_used
);
1866 virtio_xmit_cleanup(vq
, need
);
1867 need
= slots
- vq
->vq_free_cnt
;
1868 if (unlikely(need
> 0)) {
1870 "No free tx descriptors to transmit");
1875 /* Enqueue Packet buffers */
1876 virtqueue_enqueue_xmit(txvq
, txm
, slots
, use_indirect
,
1879 virtio_update_packet_stats(&txvq
->stats
, txm
);
1882 txvq
->stats
.packets
+= nb_tx
;
1884 if (likely(nb_tx
)) {
1885 vq_update_avail_idx(vq
);
1887 if (unlikely(virtqueue_kick_prepare(vq
))) {
1888 virtqueue_notify(vq
);
1889 PMD_TX_LOG(DEBUG
, "Notified backend after xmit");
1896 static __rte_always_inline
int
1897 virtio_xmit_try_cleanup_inorder(struct virtqueue
*vq
, uint16_t need
)
1899 uint16_t nb_used
, nb_clean
, nb_descs
;
1901 nb_descs
= vq
->vq_free_cnt
+ need
;
1902 nb_used
= virtqueue_nused(vq
);
1903 nb_clean
= RTE_MIN(need
, (int)nb_used
);
1905 virtio_xmit_cleanup_inorder(vq
, nb_clean
);
1907 return nb_descs
- vq
->vq_free_cnt
;
1911 virtio_xmit_pkts_inorder(void *tx_queue
,
1912 struct rte_mbuf
**tx_pkts
,
1915 struct virtnet_tx
*txvq
= tx_queue
;
1916 struct virtqueue
*vq
= txvq
->vq
;
1917 struct virtio_hw
*hw
= vq
->hw
;
1918 uint16_t hdr_size
= hw
->vtnet_hdr_size
;
1919 uint16_t nb_used
, nb_tx
= 0, nb_inorder_pkts
= 0;
1920 struct rte_mbuf
*inorder_pkts
[nb_pkts
];
1923 if (unlikely(hw
->started
== 0 && tx_pkts
!= hw
->inject_pkts
))
1926 if (unlikely(nb_pkts
< 1))
1930 PMD_TX_LOG(DEBUG
, "%d packets to xmit", nb_pkts
);
1931 nb_used
= virtqueue_nused(vq
);
1933 if (likely(nb_used
> vq
->vq_nentries
- vq
->vq_free_thresh
))
1934 virtio_xmit_cleanup_inorder(vq
, nb_used
);
1936 for (nb_tx
= 0; nb_tx
< nb_pkts
; nb_tx
++) {
1937 struct rte_mbuf
*txm
= tx_pkts
[nb_tx
];
1940 /* optimize ring usage */
1941 if ((vtpci_with_feature(hw
, VIRTIO_F_ANY_LAYOUT
) ||
1942 vtpci_with_feature(hw
, VIRTIO_F_VERSION_1
)) &&
1943 rte_mbuf_refcnt_read(txm
) == 1 &&
1944 RTE_MBUF_DIRECT(txm
) &&
1945 txm
->nb_segs
== 1 &&
1946 rte_pktmbuf_headroom(txm
) >= hdr_size
&&
1947 rte_is_aligned(rte_pktmbuf_mtod(txm
, char *),
1948 __alignof__(struct virtio_net_hdr_mrg_rxbuf
))) {
1949 inorder_pkts
[nb_inorder_pkts
] = txm
;
1955 if (nb_inorder_pkts
) {
1956 need
= nb_inorder_pkts
- vq
->vq_free_cnt
;
1957 if (unlikely(need
> 0)) {
1958 need
= virtio_xmit_try_cleanup_inorder(vq
,
1960 if (unlikely(need
> 0)) {
1962 "No free tx descriptors to "
1967 virtqueue_enqueue_xmit_inorder(txvq
, inorder_pkts
,
1969 nb_inorder_pkts
= 0;
1972 slots
= txm
->nb_segs
+ 1;
1973 need
= slots
- vq
->vq_free_cnt
;
1974 if (unlikely(need
> 0)) {
1975 need
= virtio_xmit_try_cleanup_inorder(vq
, slots
);
1977 if (unlikely(need
> 0)) {
1979 "No free tx descriptors to transmit");
1983 /* Enqueue Packet buffers */
1984 virtqueue_enqueue_xmit(txvq
, txm
, slots
, 0, 0, 1);
1986 virtio_update_packet_stats(&txvq
->stats
, txm
);
1989 /* Transmit all inorder packets */
1990 if (nb_inorder_pkts
) {
1991 need
= nb_inorder_pkts
- vq
->vq_free_cnt
;
1992 if (unlikely(need
> 0)) {
1993 need
= virtio_xmit_try_cleanup_inorder(vq
,
1995 if (unlikely(need
> 0)) {
1997 "No free tx descriptors to transmit");
1998 nb_inorder_pkts
= vq
->vq_free_cnt
;
2003 virtqueue_enqueue_xmit_inorder(txvq
, inorder_pkts
,
2007 txvq
->stats
.packets
+= nb_tx
;
2009 if (likely(nb_tx
)) {
2010 vq_update_avail_idx(vq
);
2012 if (unlikely(virtqueue_kick_prepare(vq
))) {
2013 virtqueue_notify(vq
);
2014 PMD_TX_LOG(DEBUG
, "Notified backend after xmit");
2023 #ifndef CC_AVX512_SUPPORT
2025 virtio_recv_pkts_packed_vec(void *rx_queue __rte_unused
,
2026 struct rte_mbuf
**rx_pkts __rte_unused
,
2027 uint16_t nb_pkts __rte_unused
)
2033 virtio_xmit_pkts_packed_vec(void *tx_queue __rte_unused
,
2034 struct rte_mbuf
**tx_pkts __rte_unused
,
2035 uint16_t nb_pkts __rte_unused
)
2039 #endif /* ifndef CC_AVX512_SUPPORT */