1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2010-2016 Intel Corporation
7 #include <linux/virtio_net.h>
10 #include <rte_memcpy.h>
11 #include <rte_ether.h>
13 #include <rte_vhost.h>
18 #include <rte_spinlock.h>
19 #include <rte_malloc.h>
24 #define MAX_PKT_BURST 32
26 #define MAX_BATCH_LEN 256
28 static __rte_always_inline
bool
29 rxvq_is_mergeable(struct virtio_net
*dev
)
31 return dev
->features
& (1ULL << VIRTIO_NET_F_MRG_RXBUF
);
35 is_valid_virt_queue_idx(uint32_t idx
, int is_tx
, uint32_t nr_vring
)
37 return (is_tx
^ (idx
& 1)) == 0 && idx
< nr_vring
;
40 static __rte_always_inline
void
41 do_flush_shadow_used_ring_split(struct virtio_net
*dev
,
42 struct vhost_virtqueue
*vq
,
43 uint16_t to
, uint16_t from
, uint16_t size
)
45 rte_memcpy(&vq
->used
->ring
[to
],
46 &vq
->shadow_used_split
[from
],
47 size
* sizeof(struct vring_used_elem
));
48 vhost_log_cache_used_vring(dev
, vq
,
49 offsetof(struct vring_used
, ring
[to
]),
50 size
* sizeof(struct vring_used_elem
));
53 static __rte_always_inline
void
54 flush_shadow_used_ring_split(struct virtio_net
*dev
, struct vhost_virtqueue
*vq
)
56 uint16_t used_idx
= vq
->last_used_idx
& (vq
->size
- 1);
58 if (used_idx
+ vq
->shadow_used_idx
<= vq
->size
) {
59 do_flush_shadow_used_ring_split(dev
, vq
, used_idx
, 0,
64 /* update used ring interval [used_idx, vq->size] */
65 size
= vq
->size
- used_idx
;
66 do_flush_shadow_used_ring_split(dev
, vq
, used_idx
, 0, size
);
68 /* update the left half used ring interval [0, left_size] */
69 do_flush_shadow_used_ring_split(dev
, vq
, 0, size
,
70 vq
->shadow_used_idx
- size
);
72 vq
->last_used_idx
+= vq
->shadow_used_idx
;
76 vhost_log_cache_sync(dev
, vq
);
78 *(volatile uint16_t *)&vq
->used
->idx
+= vq
->shadow_used_idx
;
79 vq
->shadow_used_idx
= 0;
80 vhost_log_used_vring(dev
, vq
, offsetof(struct vring_used
, idx
),
81 sizeof(vq
->used
->idx
));
84 static __rte_always_inline
void
85 update_shadow_used_ring_split(struct vhost_virtqueue
*vq
,
86 uint16_t desc_idx
, uint32_t len
)
88 uint16_t i
= vq
->shadow_used_idx
++;
90 vq
->shadow_used_split
[i
].id
= desc_idx
;
91 vq
->shadow_used_split
[i
].len
= len
;
94 static __rte_always_inline
void
95 flush_shadow_used_ring_packed(struct virtio_net
*dev
,
96 struct vhost_virtqueue
*vq
)
99 uint16_t used_idx
= vq
->last_used_idx
;
100 uint16_t head_idx
= vq
->last_used_idx
;
101 uint16_t head_flags
= 0;
103 /* Split loop in two to save memory barriers */
104 for (i
= 0; i
< vq
->shadow_used_idx
; i
++) {
105 vq
->desc_packed
[used_idx
].id
= vq
->shadow_used_packed
[i
].id
;
106 vq
->desc_packed
[used_idx
].len
= vq
->shadow_used_packed
[i
].len
;
108 used_idx
+= vq
->shadow_used_packed
[i
].count
;
109 if (used_idx
>= vq
->size
)
110 used_idx
-= vq
->size
;
115 for (i
= 0; i
< vq
->shadow_used_idx
; i
++) {
118 if (vq
->shadow_used_packed
[i
].len
)
119 flags
= VRING_DESC_F_WRITE
;
123 if (vq
->used_wrap_counter
) {
124 flags
|= VRING_DESC_F_USED
;
125 flags
|= VRING_DESC_F_AVAIL
;
127 flags
&= ~VRING_DESC_F_USED
;
128 flags
&= ~VRING_DESC_F_AVAIL
;
132 vq
->desc_packed
[vq
->last_used_idx
].flags
= flags
;
134 vhost_log_cache_used_vring(dev
, vq
,
136 sizeof(struct vring_packed_desc
),
137 sizeof(struct vring_packed_desc
));
139 head_idx
= vq
->last_used_idx
;
143 vq
->last_used_idx
+= vq
->shadow_used_packed
[i
].count
;
144 if (vq
->last_used_idx
>= vq
->size
) {
145 vq
->used_wrap_counter
^= 1;
146 vq
->last_used_idx
-= vq
->size
;
150 vq
->desc_packed
[head_idx
].flags
= head_flags
;
152 vhost_log_cache_used_vring(dev
, vq
,
154 sizeof(struct vring_packed_desc
),
155 sizeof(struct vring_packed_desc
));
157 vq
->shadow_used_idx
= 0;
158 vhost_log_cache_sync(dev
, vq
);
161 static __rte_always_inline
void
162 update_shadow_used_ring_packed(struct vhost_virtqueue
*vq
,
163 uint16_t desc_idx
, uint32_t len
, uint16_t count
)
165 uint16_t i
= vq
->shadow_used_idx
++;
167 vq
->shadow_used_packed
[i
].id
= desc_idx
;
168 vq
->shadow_used_packed
[i
].len
= len
;
169 vq
->shadow_used_packed
[i
].count
= count
;
173 do_data_copy_enqueue(struct virtio_net
*dev
, struct vhost_virtqueue
*vq
)
175 struct batch_copy_elem
*elem
= vq
->batch_copy_elems
;
176 uint16_t count
= vq
->batch_copy_nb_elems
;
179 for (i
= 0; i
< count
; i
++) {
180 rte_memcpy(elem
[i
].dst
, elem
[i
].src
, elem
[i
].len
);
181 vhost_log_cache_write(dev
, vq
, elem
[i
].log_addr
, elem
[i
].len
);
182 PRINT_PACKET(dev
, (uintptr_t)elem
[i
].dst
, elem
[i
].len
, 0);
185 vq
->batch_copy_nb_elems
= 0;
189 do_data_copy_dequeue(struct vhost_virtqueue
*vq
)
191 struct batch_copy_elem
*elem
= vq
->batch_copy_elems
;
192 uint16_t count
= vq
->batch_copy_nb_elems
;
195 for (i
= 0; i
< count
; i
++)
196 rte_memcpy(elem
[i
].dst
, elem
[i
].src
, elem
[i
].len
);
198 vq
->batch_copy_nb_elems
= 0;
201 /* avoid write operation when necessary, to lessen cache issues */
202 #define ASSIGN_UNLESS_EQUAL(var, val) do { \
203 if ((var) != (val)) \
207 static __rte_always_inline
void
208 virtio_enqueue_offload(struct rte_mbuf
*m_buf
, struct virtio_net_hdr
*net_hdr
)
210 uint64_t csum_l4
= m_buf
->ol_flags
& PKT_TX_L4_MASK
;
212 if (m_buf
->ol_flags
& PKT_TX_TCP_SEG
)
213 csum_l4
|= PKT_TX_TCP_CKSUM
;
216 net_hdr
->flags
= VIRTIO_NET_HDR_F_NEEDS_CSUM
;
217 net_hdr
->csum_start
= m_buf
->l2_len
+ m_buf
->l3_len
;
220 case PKT_TX_TCP_CKSUM
:
221 net_hdr
->csum_offset
= (offsetof(struct tcp_hdr
,
224 case PKT_TX_UDP_CKSUM
:
225 net_hdr
->csum_offset
= (offsetof(struct udp_hdr
,
228 case PKT_TX_SCTP_CKSUM
:
229 net_hdr
->csum_offset
= (offsetof(struct sctp_hdr
,
234 ASSIGN_UNLESS_EQUAL(net_hdr
->csum_start
, 0);
235 ASSIGN_UNLESS_EQUAL(net_hdr
->csum_offset
, 0);
236 ASSIGN_UNLESS_EQUAL(net_hdr
->flags
, 0);
239 /* IP cksum verification cannot be bypassed, then calculate here */
240 if (m_buf
->ol_flags
& PKT_TX_IP_CKSUM
) {
241 struct ipv4_hdr
*ipv4_hdr
;
243 ipv4_hdr
= rte_pktmbuf_mtod_offset(m_buf
, struct ipv4_hdr
*,
245 ipv4_hdr
->hdr_checksum
= rte_ipv4_cksum(ipv4_hdr
);
248 if (m_buf
->ol_flags
& PKT_TX_TCP_SEG
) {
249 if (m_buf
->ol_flags
& PKT_TX_IPV4
)
250 net_hdr
->gso_type
= VIRTIO_NET_HDR_GSO_TCPV4
;
252 net_hdr
->gso_type
= VIRTIO_NET_HDR_GSO_TCPV6
;
253 net_hdr
->gso_size
= m_buf
->tso_segsz
;
254 net_hdr
->hdr_len
= m_buf
->l2_len
+ m_buf
->l3_len
256 } else if (m_buf
->ol_flags
& PKT_TX_UDP_SEG
) {
257 net_hdr
->gso_type
= VIRTIO_NET_HDR_GSO_UDP
;
258 net_hdr
->gso_size
= m_buf
->tso_segsz
;
259 net_hdr
->hdr_len
= m_buf
->l2_len
+ m_buf
->l3_len
+
262 ASSIGN_UNLESS_EQUAL(net_hdr
->gso_type
, 0);
263 ASSIGN_UNLESS_EQUAL(net_hdr
->gso_size
, 0);
264 ASSIGN_UNLESS_EQUAL(net_hdr
->hdr_len
, 0);
268 static __rte_always_inline
int
269 map_one_desc(struct virtio_net
*dev
, struct vhost_virtqueue
*vq
,
270 struct buf_vector
*buf_vec
, uint16_t *vec_idx
,
271 uint64_t desc_iova
, uint64_t desc_len
, uint8_t perm
)
273 uint16_t vec_id
= *vec_idx
;
277 uint64_t desc_chunck_len
= desc_len
;
279 if (unlikely(vec_id
>= BUF_VECTOR_MAX
))
282 desc_addr
= vhost_iova_to_vva(dev
, vq
,
286 if (unlikely(!desc_addr
))
289 buf_vec
[vec_id
].buf_iova
= desc_iova
;
290 buf_vec
[vec_id
].buf_addr
= desc_addr
;
291 buf_vec
[vec_id
].buf_len
= desc_chunck_len
;
293 desc_len
-= desc_chunck_len
;
294 desc_iova
+= desc_chunck_len
;
302 static __rte_always_inline
int
303 fill_vec_buf_split(struct virtio_net
*dev
, struct vhost_virtqueue
*vq
,
304 uint32_t avail_idx
, uint16_t *vec_idx
,
305 struct buf_vector
*buf_vec
, uint16_t *desc_chain_head
,
306 uint32_t *desc_chain_len
, uint8_t perm
)
308 uint16_t idx
= vq
->avail
->ring
[avail_idx
& (vq
->size
- 1)];
309 uint16_t vec_id
= *vec_idx
;
312 uint32_t nr_descs
= vq
->size
;
314 struct vring_desc
*descs
= vq
->desc
;
315 struct vring_desc
*idesc
= NULL
;
317 if (unlikely(idx
>= vq
->size
))
320 *desc_chain_head
= idx
;
322 if (vq
->desc
[idx
].flags
& VRING_DESC_F_INDIRECT
) {
323 dlen
= vq
->desc
[idx
].len
;
324 nr_descs
= dlen
/ sizeof(struct vring_desc
);
325 if (unlikely(nr_descs
> vq
->size
))
328 descs
= (struct vring_desc
*)(uintptr_t)
329 vhost_iova_to_vva(dev
, vq
, vq
->desc
[idx
].addr
,
332 if (unlikely(!descs
))
335 if (unlikely(dlen
< vq
->desc
[idx
].len
)) {
337 * The indirect desc table is not contiguous
338 * in process VA space, we have to copy it.
340 idesc
= alloc_copy_ind_table(dev
, vq
,
341 vq
->desc
[idx
].addr
, vq
->desc
[idx
].len
);
342 if (unlikely(!idesc
))
352 if (unlikely(idx
>= nr_descs
|| cnt
++ >= nr_descs
)) {
353 free_ind_table(idesc
);
357 len
+= descs
[idx
].len
;
359 if (unlikely(map_one_desc(dev
, vq
, buf_vec
, &vec_id
,
360 descs
[idx
].addr
, descs
[idx
].len
,
362 free_ind_table(idesc
);
366 if ((descs
[idx
].flags
& VRING_DESC_F_NEXT
) == 0)
369 idx
= descs
[idx
].next
;
372 *desc_chain_len
= len
;
375 if (unlikely(!!idesc
))
376 free_ind_table(idesc
);
382 * Returns -1 on fail, 0 on success
385 reserve_avail_buf_split(struct virtio_net
*dev
, struct vhost_virtqueue
*vq
,
386 uint32_t size
, struct buf_vector
*buf_vec
,
387 uint16_t *num_buffers
, uint16_t avail_head
,
391 uint16_t vec_idx
= 0;
392 uint16_t max_tries
, tries
= 0;
394 uint16_t head_idx
= 0;
398 cur_idx
= vq
->last_avail_idx
;
400 if (rxvq_is_mergeable(dev
))
401 max_tries
= vq
->size
- 1;
406 if (unlikely(cur_idx
== avail_head
))
409 * if we tried all available ring items, and still
410 * can't get enough buf, it means something abnormal
413 if (unlikely(++tries
> max_tries
))
416 if (unlikely(fill_vec_buf_split(dev
, vq
, cur_idx
,
419 VHOST_ACCESS_RW
) < 0))
421 len
= RTE_MIN(len
, size
);
422 update_shadow_used_ring_split(vq
, head_idx
, len
);
434 static __rte_always_inline
int
435 fill_vec_buf_packed_indirect(struct virtio_net
*dev
,
436 struct vhost_virtqueue
*vq
,
437 struct vring_packed_desc
*desc
, uint16_t *vec_idx
,
438 struct buf_vector
*buf_vec
, uint32_t *len
, uint8_t perm
)
442 uint16_t vec_id
= *vec_idx
;
444 struct vring_packed_desc
*descs
, *idescs
= NULL
;
447 descs
= (struct vring_packed_desc
*)(uintptr_t)
448 vhost_iova_to_vva(dev
, vq
, desc
->addr
, &dlen
, VHOST_ACCESS_RO
);
449 if (unlikely(!descs
))
452 if (unlikely(dlen
< desc
->len
)) {
454 * The indirect desc table is not contiguous
455 * in process VA space, we have to copy it.
457 idescs
= alloc_copy_ind_table(dev
, vq
, desc
->addr
, desc
->len
);
458 if (unlikely(!idescs
))
464 nr_descs
= desc
->len
/ sizeof(struct vring_packed_desc
);
465 if (unlikely(nr_descs
>= vq
->size
)) {
466 free_ind_table(idescs
);
470 for (i
= 0; i
< nr_descs
; i
++) {
471 if (unlikely(vec_id
>= BUF_VECTOR_MAX
)) {
472 free_ind_table(idescs
);
476 *len
+= descs
[i
].len
;
477 if (unlikely(map_one_desc(dev
, vq
, buf_vec
, &vec_id
,
478 descs
[i
].addr
, descs
[i
].len
,
484 if (unlikely(!!idescs
))
485 free_ind_table(idescs
);
490 static __rte_always_inline
int
491 fill_vec_buf_packed(struct virtio_net
*dev
, struct vhost_virtqueue
*vq
,
492 uint16_t avail_idx
, uint16_t *desc_count
,
493 struct buf_vector
*buf_vec
, uint16_t *vec_idx
,
494 uint16_t *buf_id
, uint32_t *len
, uint8_t perm
)
496 bool wrap_counter
= vq
->avail_wrap_counter
;
497 struct vring_packed_desc
*descs
= vq
->desc_packed
;
498 uint16_t vec_id
= *vec_idx
;
500 if (avail_idx
< vq
->last_avail_idx
)
503 if (unlikely(!desc_is_avail(&descs
[avail_idx
], wrap_counter
)))
507 * The ordering between desc flags and desc
508 * content reads need to be enforced.
516 if (unlikely(vec_id
>= BUF_VECTOR_MAX
))
519 if (unlikely(*desc_count
>= vq
->size
))
523 *buf_id
= descs
[avail_idx
].id
;
525 if (descs
[avail_idx
].flags
& VRING_DESC_F_INDIRECT
) {
526 if (unlikely(fill_vec_buf_packed_indirect(dev
, vq
,
532 *len
+= descs
[avail_idx
].len
;
534 if (unlikely(map_one_desc(dev
, vq
, buf_vec
, &vec_id
,
535 descs
[avail_idx
].addr
,
536 descs
[avail_idx
].len
,
541 if ((descs
[avail_idx
].flags
& VRING_DESC_F_NEXT
) == 0)
544 if (++avail_idx
>= vq
->size
) {
545 avail_idx
-= vq
->size
;
556 * Returns -1 on fail, 0 on success
559 reserve_avail_buf_packed(struct virtio_net
*dev
, struct vhost_virtqueue
*vq
,
560 uint32_t size
, struct buf_vector
*buf_vec
,
561 uint16_t *nr_vec
, uint16_t *num_buffers
,
565 uint16_t vec_idx
= 0;
566 uint16_t max_tries
, tries
= 0;
573 avail_idx
= vq
->last_avail_idx
;
575 if (rxvq_is_mergeable(dev
))
576 max_tries
= vq
->size
- 1;
582 * if we tried all available ring items, and still
583 * can't get enough buf, it means something abnormal
586 if (unlikely(++tries
> max_tries
))
589 if (unlikely(fill_vec_buf_packed(dev
, vq
,
590 avail_idx
, &desc_count
,
593 VHOST_ACCESS_RW
) < 0))
596 len
= RTE_MIN(len
, size
);
597 update_shadow_used_ring_packed(vq
, buf_id
, len
, desc_count
);
600 avail_idx
+= desc_count
;
601 if (avail_idx
>= vq
->size
)
602 avail_idx
-= vq
->size
;
604 *nr_descs
+= desc_count
;
613 static __rte_always_inline
int
614 copy_mbuf_to_desc(struct virtio_net
*dev
, struct vhost_virtqueue
*vq
,
615 struct rte_mbuf
*m
, struct buf_vector
*buf_vec
,
616 uint16_t nr_vec
, uint16_t num_buffers
)
618 uint32_t vec_idx
= 0;
619 uint32_t mbuf_offset
, mbuf_avail
;
620 uint32_t buf_offset
, buf_avail
;
621 uint64_t buf_addr
, buf_iova
, buf_len
;
624 struct rte_mbuf
*hdr_mbuf
;
625 struct batch_copy_elem
*batch_copy
= vq
->batch_copy_elems
;
626 struct virtio_net_hdr_mrg_rxbuf tmp_hdr
, *hdr
= NULL
;
629 if (unlikely(m
== NULL
)) {
634 buf_addr
= buf_vec
[vec_idx
].buf_addr
;
635 buf_iova
= buf_vec
[vec_idx
].buf_iova
;
636 buf_len
= buf_vec
[vec_idx
].buf_len
;
639 rte_prefetch0((void *)(uintptr_t)buf_vec
[1].buf_addr
);
641 if (unlikely(buf_len
< dev
->vhost_hlen
&& nr_vec
<= 1)) {
648 if (unlikely(buf_len
< dev
->vhost_hlen
))
651 hdr
= (struct virtio_net_hdr_mrg_rxbuf
*)(uintptr_t)hdr_addr
;
653 VHOST_LOG_DEBUG(VHOST_DATA
, "(%d) RX: num merge buffers %d\n",
654 dev
->vid
, num_buffers
);
656 if (unlikely(buf_len
< dev
->vhost_hlen
)) {
657 buf_offset
= dev
->vhost_hlen
- buf_len
;
659 buf_addr
= buf_vec
[vec_idx
].buf_addr
;
660 buf_iova
= buf_vec
[vec_idx
].buf_iova
;
661 buf_len
= buf_vec
[vec_idx
].buf_len
;
662 buf_avail
= buf_len
- buf_offset
;
664 buf_offset
= dev
->vhost_hlen
;
665 buf_avail
= buf_len
- dev
->vhost_hlen
;
668 mbuf_avail
= rte_pktmbuf_data_len(m
);
670 while (mbuf_avail
!= 0 || m
->next
!= NULL
) {
671 /* done with current buf, get the next one */
672 if (buf_avail
== 0) {
674 if (unlikely(vec_idx
>= nr_vec
)) {
679 buf_addr
= buf_vec
[vec_idx
].buf_addr
;
680 buf_iova
= buf_vec
[vec_idx
].buf_iova
;
681 buf_len
= buf_vec
[vec_idx
].buf_len
;
683 /* Prefetch next buffer address. */
684 if (vec_idx
+ 1 < nr_vec
)
685 rte_prefetch0((void *)(uintptr_t)
686 buf_vec
[vec_idx
+ 1].buf_addr
);
691 /* done with current mbuf, get the next one */
692 if (mbuf_avail
== 0) {
696 mbuf_avail
= rte_pktmbuf_data_len(m
);
700 virtio_enqueue_offload(hdr_mbuf
, &hdr
->hdr
);
701 if (rxvq_is_mergeable(dev
))
702 ASSIGN_UNLESS_EQUAL(hdr
->num_buffers
,
705 if (unlikely(hdr
== &tmp_hdr
)) {
707 uint64_t remain
= dev
->vhost_hlen
;
708 uint64_t src
= (uint64_t)(uintptr_t)hdr
, dst
;
709 uint64_t iova
= buf_vec
[0].buf_iova
;
710 uint16_t hdr_vec_idx
= 0;
713 len
= RTE_MIN(remain
,
714 buf_vec
[hdr_vec_idx
].buf_len
);
715 dst
= buf_vec
[hdr_vec_idx
].buf_addr
;
716 rte_memcpy((void *)(uintptr_t)dst
,
717 (void *)(uintptr_t)src
,
720 PRINT_PACKET(dev
, (uintptr_t)dst
,
722 vhost_log_cache_write(dev
, vq
,
731 PRINT_PACKET(dev
, (uintptr_t)hdr_addr
,
733 vhost_log_cache_write(dev
, vq
,
741 cpy_len
= RTE_MIN(buf_avail
, mbuf_avail
);
743 if (likely(cpy_len
> MAX_BATCH_LEN
||
744 vq
->batch_copy_nb_elems
>= vq
->size
)) {
745 rte_memcpy((void *)((uintptr_t)(buf_addr
+ buf_offset
)),
746 rte_pktmbuf_mtod_offset(m
, void *, mbuf_offset
),
748 vhost_log_cache_write(dev
, vq
, buf_iova
+ buf_offset
,
750 PRINT_PACKET(dev
, (uintptr_t)(buf_addr
+ buf_offset
),
753 batch_copy
[vq
->batch_copy_nb_elems
].dst
=
754 (void *)((uintptr_t)(buf_addr
+ buf_offset
));
755 batch_copy
[vq
->batch_copy_nb_elems
].src
=
756 rte_pktmbuf_mtod_offset(m
, void *, mbuf_offset
);
757 batch_copy
[vq
->batch_copy_nb_elems
].log_addr
=
758 buf_iova
+ buf_offset
;
759 batch_copy
[vq
->batch_copy_nb_elems
].len
= cpy_len
;
760 vq
->batch_copy_nb_elems
++;
763 mbuf_avail
-= cpy_len
;
764 mbuf_offset
+= cpy_len
;
765 buf_avail
-= cpy_len
;
766 buf_offset
+= cpy_len
;
774 static __rte_always_inline
uint32_t
775 virtio_dev_rx_split(struct virtio_net
*dev
, struct vhost_virtqueue
*vq
,
776 struct rte_mbuf
**pkts
, uint32_t count
)
778 uint32_t pkt_idx
= 0;
779 uint16_t num_buffers
;
780 struct buf_vector buf_vec
[BUF_VECTOR_MAX
];
783 avail_head
= *((volatile uint16_t *)&vq
->avail
->idx
);
786 * The ordering between avail index and
787 * desc reads needs to be enforced.
791 rte_prefetch0(&vq
->avail
->ring
[vq
->last_avail_idx
& (vq
->size
- 1)]);
793 for (pkt_idx
= 0; pkt_idx
< count
; pkt_idx
++) {
794 uint32_t pkt_len
= pkts
[pkt_idx
]->pkt_len
+ dev
->vhost_hlen
;
797 if (unlikely(reserve_avail_buf_split(dev
, vq
,
798 pkt_len
, buf_vec
, &num_buffers
,
799 avail_head
, &nr_vec
) < 0)) {
800 VHOST_LOG_DEBUG(VHOST_DATA
,
801 "(%d) failed to get enough desc from vring\n",
803 vq
->shadow_used_idx
-= num_buffers
;
807 rte_prefetch0((void *)(uintptr_t)buf_vec
[0].buf_addr
);
809 VHOST_LOG_DEBUG(VHOST_DATA
, "(%d) current index %d | end index %d\n",
810 dev
->vid
, vq
->last_avail_idx
,
811 vq
->last_avail_idx
+ num_buffers
);
813 if (copy_mbuf_to_desc(dev
, vq
, pkts
[pkt_idx
],
816 vq
->shadow_used_idx
-= num_buffers
;
820 vq
->last_avail_idx
+= num_buffers
;
823 do_data_copy_enqueue(dev
, vq
);
825 if (likely(vq
->shadow_used_idx
)) {
826 flush_shadow_used_ring_split(dev
, vq
);
827 vhost_vring_call_split(dev
, vq
);
833 static __rte_always_inline
uint32_t
834 virtio_dev_rx_packed(struct virtio_net
*dev
, struct vhost_virtqueue
*vq
,
835 struct rte_mbuf
**pkts
, uint32_t count
)
837 uint32_t pkt_idx
= 0;
838 uint16_t num_buffers
;
839 struct buf_vector buf_vec
[BUF_VECTOR_MAX
];
841 for (pkt_idx
= 0; pkt_idx
< count
; pkt_idx
++) {
842 uint32_t pkt_len
= pkts
[pkt_idx
]->pkt_len
+ dev
->vhost_hlen
;
844 uint16_t nr_descs
= 0;
846 if (unlikely(reserve_avail_buf_packed(dev
, vq
,
847 pkt_len
, buf_vec
, &nr_vec
,
848 &num_buffers
, &nr_descs
) < 0)) {
849 VHOST_LOG_DEBUG(VHOST_DATA
,
850 "(%d) failed to get enough desc from vring\n",
852 vq
->shadow_used_idx
-= num_buffers
;
856 rte_prefetch0((void *)(uintptr_t)buf_vec
[0].buf_addr
);
858 VHOST_LOG_DEBUG(VHOST_DATA
, "(%d) current index %d | end index %d\n",
859 dev
->vid
, vq
->last_avail_idx
,
860 vq
->last_avail_idx
+ num_buffers
);
862 if (copy_mbuf_to_desc(dev
, vq
, pkts
[pkt_idx
],
865 vq
->shadow_used_idx
-= num_buffers
;
869 vq
->last_avail_idx
+= nr_descs
;
870 if (vq
->last_avail_idx
>= vq
->size
) {
871 vq
->last_avail_idx
-= vq
->size
;
872 vq
->avail_wrap_counter
^= 1;
876 do_data_copy_enqueue(dev
, vq
);
878 if (likely(vq
->shadow_used_idx
)) {
879 flush_shadow_used_ring_packed(dev
, vq
);
880 vhost_vring_call_packed(dev
, vq
);
886 static __rte_always_inline
uint32_t
887 virtio_dev_rx(struct virtio_net
*dev
, uint16_t queue_id
,
888 struct rte_mbuf
**pkts
, uint32_t count
)
890 struct vhost_virtqueue
*vq
;
893 VHOST_LOG_DEBUG(VHOST_DATA
, "(%d) %s\n", dev
->vid
, __func__
);
894 if (unlikely(!is_valid_virt_queue_idx(queue_id
, 0, dev
->nr_vring
))) {
895 RTE_LOG(ERR
, VHOST_DATA
, "(%d) %s: invalid virtqueue idx %d.\n",
896 dev
->vid
, __func__
, queue_id
);
900 vq
= dev
->virtqueue
[queue_id
];
902 rte_spinlock_lock(&vq
->access_lock
);
904 if (unlikely(vq
->enabled
== 0))
905 goto out_access_unlock
;
907 if (dev
->features
& (1ULL << VIRTIO_F_IOMMU_PLATFORM
))
908 vhost_user_iotlb_rd_lock(vq
);
910 if (unlikely(vq
->access_ok
== 0))
911 if (unlikely(vring_translate(dev
, vq
) < 0))
914 count
= RTE_MIN((uint32_t)MAX_PKT_BURST
, count
);
918 if (vq_is_packed(dev
))
919 nb_tx
= virtio_dev_rx_packed(dev
, vq
, pkts
, count
);
921 nb_tx
= virtio_dev_rx_split(dev
, vq
, pkts
, count
);
924 if (dev
->features
& (1ULL << VIRTIO_F_IOMMU_PLATFORM
))
925 vhost_user_iotlb_rd_unlock(vq
);
928 rte_spinlock_unlock(&vq
->access_lock
);
934 rte_vhost_enqueue_burst(int vid
, uint16_t queue_id
,
935 struct rte_mbuf
**pkts
, uint16_t count
)
937 struct virtio_net
*dev
= get_device(vid
);
942 if (unlikely(!(dev
->flags
& VIRTIO_DEV_BUILTIN_VIRTIO_NET
))) {
943 RTE_LOG(ERR
, VHOST_DATA
,
944 "(%d) %s: built-in vhost net backend is disabled.\n",
949 return virtio_dev_rx(dev
, queue_id
, pkts
, count
);
953 virtio_net_with_host_offload(struct virtio_net
*dev
)
956 ((1ULL << VIRTIO_NET_F_CSUM
) |
957 (1ULL << VIRTIO_NET_F_HOST_ECN
) |
958 (1ULL << VIRTIO_NET_F_HOST_TSO4
) |
959 (1ULL << VIRTIO_NET_F_HOST_TSO6
) |
960 (1ULL << VIRTIO_NET_F_HOST_UFO
)))
967 parse_ethernet(struct rte_mbuf
*m
, uint16_t *l4_proto
, void **l4_hdr
)
969 struct ipv4_hdr
*ipv4_hdr
;
970 struct ipv6_hdr
*ipv6_hdr
;
972 struct ether_hdr
*eth_hdr
;
975 eth_hdr
= rte_pktmbuf_mtod(m
, struct ether_hdr
*);
977 m
->l2_len
= sizeof(struct ether_hdr
);
978 ethertype
= rte_be_to_cpu_16(eth_hdr
->ether_type
);
980 if (ethertype
== ETHER_TYPE_VLAN
) {
981 struct vlan_hdr
*vlan_hdr
= (struct vlan_hdr
*)(eth_hdr
+ 1);
983 m
->l2_len
+= sizeof(struct vlan_hdr
);
984 ethertype
= rte_be_to_cpu_16(vlan_hdr
->eth_proto
);
987 l3_hdr
= (char *)eth_hdr
+ m
->l2_len
;
990 case ETHER_TYPE_IPv4
:
992 *l4_proto
= ipv4_hdr
->next_proto_id
;
993 m
->l3_len
= (ipv4_hdr
->version_ihl
& 0x0f) * 4;
994 *l4_hdr
= (char *)l3_hdr
+ m
->l3_len
;
995 m
->ol_flags
|= PKT_TX_IPV4
;
997 case ETHER_TYPE_IPv6
:
999 *l4_proto
= ipv6_hdr
->proto
;
1000 m
->l3_len
= sizeof(struct ipv6_hdr
);
1001 *l4_hdr
= (char *)l3_hdr
+ m
->l3_len
;
1002 m
->ol_flags
|= PKT_TX_IPV6
;
1012 static __rte_always_inline
void
1013 vhost_dequeue_offload(struct virtio_net_hdr
*hdr
, struct rte_mbuf
*m
)
1015 uint16_t l4_proto
= 0;
1016 void *l4_hdr
= NULL
;
1017 struct tcp_hdr
*tcp_hdr
= NULL
;
1019 if (hdr
->flags
== 0 && hdr
->gso_type
== VIRTIO_NET_HDR_GSO_NONE
)
1022 parse_ethernet(m
, &l4_proto
, &l4_hdr
);
1023 if (hdr
->flags
== VIRTIO_NET_HDR_F_NEEDS_CSUM
) {
1024 if (hdr
->csum_start
== (m
->l2_len
+ m
->l3_len
)) {
1025 switch (hdr
->csum_offset
) {
1026 case (offsetof(struct tcp_hdr
, cksum
)):
1027 if (l4_proto
== IPPROTO_TCP
)
1028 m
->ol_flags
|= PKT_TX_TCP_CKSUM
;
1030 case (offsetof(struct udp_hdr
, dgram_cksum
)):
1031 if (l4_proto
== IPPROTO_UDP
)
1032 m
->ol_flags
|= PKT_TX_UDP_CKSUM
;
1034 case (offsetof(struct sctp_hdr
, cksum
)):
1035 if (l4_proto
== IPPROTO_SCTP
)
1036 m
->ol_flags
|= PKT_TX_SCTP_CKSUM
;
1044 if (l4_hdr
&& hdr
->gso_type
!= VIRTIO_NET_HDR_GSO_NONE
) {
1045 switch (hdr
->gso_type
& ~VIRTIO_NET_HDR_GSO_ECN
) {
1046 case VIRTIO_NET_HDR_GSO_TCPV4
:
1047 case VIRTIO_NET_HDR_GSO_TCPV6
:
1049 m
->ol_flags
|= PKT_TX_TCP_SEG
;
1050 m
->tso_segsz
= hdr
->gso_size
;
1051 m
->l4_len
= (tcp_hdr
->data_off
& 0xf0) >> 2;
1053 case VIRTIO_NET_HDR_GSO_UDP
:
1054 m
->ol_flags
|= PKT_TX_UDP_SEG
;
1055 m
->tso_segsz
= hdr
->gso_size
;
1056 m
->l4_len
= sizeof(struct udp_hdr
);
1059 RTE_LOG(WARNING
, VHOST_DATA
,
1060 "unsupported gso type %u.\n", hdr
->gso_type
);
1066 static __rte_always_inline
int
1067 copy_desc_to_mbuf(struct virtio_net
*dev
, struct vhost_virtqueue
*vq
,
1068 struct buf_vector
*buf_vec
, uint16_t nr_vec
,
1069 struct rte_mbuf
*m
, struct rte_mempool
*mbuf_pool
)
1071 uint32_t buf_avail
, buf_offset
;
1072 uint64_t buf_addr
, buf_iova
, buf_len
;
1073 uint32_t mbuf_avail
, mbuf_offset
;
1075 struct rte_mbuf
*cur
= m
, *prev
= m
;
1076 struct virtio_net_hdr tmp_hdr
;
1077 struct virtio_net_hdr
*hdr
= NULL
;
1078 /* A counter to avoid desc dead loop chain */
1079 uint16_t vec_idx
= 0;
1080 struct batch_copy_elem
*batch_copy
= vq
->batch_copy_elems
;
1083 buf_addr
= buf_vec
[vec_idx
].buf_addr
;
1084 buf_iova
= buf_vec
[vec_idx
].buf_iova
;
1085 buf_len
= buf_vec
[vec_idx
].buf_len
;
1087 if (unlikely(buf_len
< dev
->vhost_hlen
&& nr_vec
<= 1)) {
1092 if (likely(nr_vec
> 1))
1093 rte_prefetch0((void *)(uintptr_t)buf_vec
[1].buf_addr
);
1095 if (virtio_net_with_host_offload(dev
)) {
1096 if (unlikely(buf_len
< sizeof(struct virtio_net_hdr
))) {
1098 uint64_t remain
= sizeof(struct virtio_net_hdr
);
1100 uint64_t dst
= (uint64_t)(uintptr_t)&tmp_hdr
;
1101 uint16_t hdr_vec_idx
= 0;
1104 * No luck, the virtio-net header doesn't fit
1105 * in a contiguous virtual area.
1108 len
= RTE_MIN(remain
,
1109 buf_vec
[hdr_vec_idx
].buf_len
);
1110 src
= buf_vec
[hdr_vec_idx
].buf_addr
;
1111 rte_memcpy((void *)(uintptr_t)dst
,
1112 (void *)(uintptr_t)src
, len
);
1121 hdr
= (struct virtio_net_hdr
*)((uintptr_t)buf_addr
);
1127 * A virtio driver normally uses at least 2 desc buffers
1128 * for Tx: the first for storing the header, and others
1129 * for storing the data.
1131 if (unlikely(buf_len
< dev
->vhost_hlen
)) {
1132 buf_offset
= dev
->vhost_hlen
- buf_len
;
1134 buf_addr
= buf_vec
[vec_idx
].buf_addr
;
1135 buf_iova
= buf_vec
[vec_idx
].buf_iova
;
1136 buf_len
= buf_vec
[vec_idx
].buf_len
;
1137 buf_avail
= buf_len
- buf_offset
;
1138 } else if (buf_len
== dev
->vhost_hlen
) {
1139 if (unlikely(++vec_idx
>= nr_vec
))
1141 buf_addr
= buf_vec
[vec_idx
].buf_addr
;
1142 buf_iova
= buf_vec
[vec_idx
].buf_iova
;
1143 buf_len
= buf_vec
[vec_idx
].buf_len
;
1146 buf_avail
= buf_len
;
1148 buf_offset
= dev
->vhost_hlen
;
1149 buf_avail
= buf_vec
[vec_idx
].buf_len
- dev
->vhost_hlen
;
1152 rte_prefetch0((void *)(uintptr_t)
1153 (buf_addr
+ buf_offset
));
1156 (uintptr_t)(buf_addr
+ buf_offset
),
1157 (uint32_t)buf_avail
, 0);
1160 mbuf_avail
= m
->buf_len
- RTE_PKTMBUF_HEADROOM
;
1164 cpy_len
= RTE_MIN(buf_avail
, mbuf_avail
);
1167 * A desc buf might across two host physical pages that are
1168 * not continuous. In such case (gpa_to_hpa returns 0), data
1169 * will be copied even though zero copy is enabled.
1171 if (unlikely(dev
->dequeue_zero_copy
&& (hpa
= gpa_to_hpa(dev
,
1172 buf_iova
+ buf_offset
, cpy_len
)))) {
1173 cur
->data_len
= cpy_len
;
1176 (void *)(uintptr_t)(buf_addr
+ buf_offset
);
1177 cur
->buf_iova
= hpa
;
1180 * In zero copy mode, one mbuf can only reference data
1181 * for one or partial of one desc buff.
1183 mbuf_avail
= cpy_len
;
1185 if (likely(cpy_len
> MAX_BATCH_LEN
||
1186 vq
->batch_copy_nb_elems
>= vq
->size
||
1187 (hdr
&& cur
== m
))) {
1188 rte_memcpy(rte_pktmbuf_mtod_offset(cur
, void *,
1190 (void *)((uintptr_t)(buf_addr
+
1194 batch_copy
[vq
->batch_copy_nb_elems
].dst
=
1195 rte_pktmbuf_mtod_offset(cur
, void *,
1197 batch_copy
[vq
->batch_copy_nb_elems
].src
=
1198 (void *)((uintptr_t)(buf_addr
+
1200 batch_copy
[vq
->batch_copy_nb_elems
].len
=
1202 vq
->batch_copy_nb_elems
++;
1206 mbuf_avail
-= cpy_len
;
1207 mbuf_offset
+= cpy_len
;
1208 buf_avail
-= cpy_len
;
1209 buf_offset
+= cpy_len
;
1211 /* This buf reaches to its end, get the next one */
1212 if (buf_avail
== 0) {
1213 if (++vec_idx
>= nr_vec
)
1216 buf_addr
= buf_vec
[vec_idx
].buf_addr
;
1217 buf_iova
= buf_vec
[vec_idx
].buf_iova
;
1218 buf_len
= buf_vec
[vec_idx
].buf_len
;
1221 * Prefecth desc n + 1 buffer while
1222 * desc n buffer is processed.
1224 if (vec_idx
+ 1 < nr_vec
)
1225 rte_prefetch0((void *)(uintptr_t)
1226 buf_vec
[vec_idx
+ 1].buf_addr
);
1229 buf_avail
= buf_len
;
1231 PRINT_PACKET(dev
, (uintptr_t)buf_addr
,
1232 (uint32_t)buf_avail
, 0);
1236 * This mbuf reaches to its end, get a new one
1237 * to hold more data.
1239 if (mbuf_avail
== 0) {
1240 cur
= rte_pktmbuf_alloc(mbuf_pool
);
1241 if (unlikely(cur
== NULL
)) {
1242 RTE_LOG(ERR
, VHOST_DATA
, "Failed to "
1243 "allocate memory for mbuf.\n");
1247 if (unlikely(dev
->dequeue_zero_copy
))
1248 rte_mbuf_refcnt_update(cur
, 1);
1251 prev
->data_len
= mbuf_offset
;
1253 m
->pkt_len
+= mbuf_offset
;
1257 mbuf_avail
= cur
->buf_len
- RTE_PKTMBUF_HEADROOM
;
1261 prev
->data_len
= mbuf_offset
;
1262 m
->pkt_len
+= mbuf_offset
;
1265 vhost_dequeue_offload(hdr
, m
);
1272 static __rte_always_inline
struct zcopy_mbuf
*
1273 get_zmbuf(struct vhost_virtqueue
*vq
)
1279 /* search [last_zmbuf_idx, zmbuf_size) */
1280 i
= vq
->last_zmbuf_idx
;
1281 last
= vq
->zmbuf_size
;
1284 for (; i
< last
; i
++) {
1285 if (vq
->zmbufs
[i
].in_use
== 0) {
1286 vq
->last_zmbuf_idx
= i
+ 1;
1287 vq
->zmbufs
[i
].in_use
= 1;
1288 return &vq
->zmbufs
[i
];
1294 /* search [0, last_zmbuf_idx) */
1296 last
= vq
->last_zmbuf_idx
;
1303 static __rte_always_inline
uint16_t
1304 virtio_dev_tx_split(struct virtio_net
*dev
, struct vhost_virtqueue
*vq
,
1305 struct rte_mempool
*mbuf_pool
, struct rte_mbuf
**pkts
, uint16_t count
)
1308 uint16_t free_entries
;
1310 if (unlikely(dev
->dequeue_zero_copy
)) {
1311 struct zcopy_mbuf
*zmbuf
, *next
;
1313 for (zmbuf
= TAILQ_FIRST(&vq
->zmbuf_list
);
1314 zmbuf
!= NULL
; zmbuf
= next
) {
1315 next
= TAILQ_NEXT(zmbuf
, next
);
1317 if (mbuf_is_consumed(zmbuf
->mbuf
)) {
1318 update_shadow_used_ring_split(vq
,
1319 zmbuf
->desc_idx
, 0);
1320 TAILQ_REMOVE(&vq
->zmbuf_list
, zmbuf
, next
);
1321 restore_mbuf(zmbuf
->mbuf
);
1322 rte_pktmbuf_free(zmbuf
->mbuf
);
1328 if (likely(vq
->shadow_used_idx
)) {
1329 flush_shadow_used_ring_split(dev
, vq
);
1330 vhost_vring_call_split(dev
, vq
);
1334 free_entries
= *((volatile uint16_t *)&vq
->avail
->idx
) -
1336 if (free_entries
== 0)
1340 * The ordering between avail index and
1341 * desc reads needs to be enforced.
1345 rte_prefetch0(&vq
->avail
->ring
[vq
->last_avail_idx
& (vq
->size
- 1)]);
1347 VHOST_LOG_DEBUG(VHOST_DATA
, "(%d) %s\n", dev
->vid
, __func__
);
1349 count
= RTE_MIN(count
, MAX_PKT_BURST
);
1350 count
= RTE_MIN(count
, free_entries
);
1351 VHOST_LOG_DEBUG(VHOST_DATA
, "(%d) about to dequeue %u buffers\n",
1354 for (i
= 0; i
< count
; i
++) {
1355 struct buf_vector buf_vec
[BUF_VECTOR_MAX
];
1358 uint16_t nr_vec
= 0;
1361 if (unlikely(fill_vec_buf_split(dev
, vq
,
1362 vq
->last_avail_idx
+ i
,
1364 &head_idx
, &dummy_len
,
1365 VHOST_ACCESS_RO
) < 0))
1368 if (likely(dev
->dequeue_zero_copy
== 0))
1369 update_shadow_used_ring_split(vq
, head_idx
, 0);
1371 rte_prefetch0((void *)(uintptr_t)buf_vec
[0].buf_addr
);
1373 pkts
[i
] = rte_pktmbuf_alloc(mbuf_pool
);
1374 if (unlikely(pkts
[i
] == NULL
)) {
1375 RTE_LOG(ERR
, VHOST_DATA
,
1376 "Failed to allocate memory for mbuf.\n");
1380 err
= copy_desc_to_mbuf(dev
, vq
, buf_vec
, nr_vec
, pkts
[i
],
1382 if (unlikely(err
)) {
1383 rte_pktmbuf_free(pkts
[i
]);
1387 if (unlikely(dev
->dequeue_zero_copy
)) {
1388 struct zcopy_mbuf
*zmbuf
;
1390 zmbuf
= get_zmbuf(vq
);
1392 rte_pktmbuf_free(pkts
[i
]);
1395 zmbuf
->mbuf
= pkts
[i
];
1396 zmbuf
->desc_idx
= head_idx
;
1399 * Pin lock the mbuf; we will check later to see
1400 * whether the mbuf is freed (when we are the last
1401 * user) or not. If that's the case, we then could
1402 * update the used ring safely.
1404 rte_mbuf_refcnt_update(pkts
[i
], 1);
1407 TAILQ_INSERT_TAIL(&vq
->zmbuf_list
, zmbuf
, next
);
1410 vq
->last_avail_idx
+= i
;
1412 if (likely(dev
->dequeue_zero_copy
== 0)) {
1413 do_data_copy_dequeue(vq
);
1414 if (unlikely(i
< count
))
1415 vq
->shadow_used_idx
= i
;
1416 if (likely(vq
->shadow_used_idx
)) {
1417 flush_shadow_used_ring_split(dev
, vq
);
1418 vhost_vring_call_split(dev
, vq
);
1425 static __rte_always_inline
uint16_t
1426 virtio_dev_tx_packed(struct virtio_net
*dev
, struct vhost_virtqueue
*vq
,
1427 struct rte_mempool
*mbuf_pool
, struct rte_mbuf
**pkts
, uint16_t count
)
1431 if (unlikely(dev
->dequeue_zero_copy
)) {
1432 struct zcopy_mbuf
*zmbuf
, *next
;
1434 for (zmbuf
= TAILQ_FIRST(&vq
->zmbuf_list
);
1435 zmbuf
!= NULL
; zmbuf
= next
) {
1436 next
= TAILQ_NEXT(zmbuf
, next
);
1438 if (mbuf_is_consumed(zmbuf
->mbuf
)) {
1439 update_shadow_used_ring_packed(vq
,
1444 TAILQ_REMOVE(&vq
->zmbuf_list
, zmbuf
, next
);
1445 restore_mbuf(zmbuf
->mbuf
);
1446 rte_pktmbuf_free(zmbuf
->mbuf
);
1452 if (likely(vq
->shadow_used_idx
)) {
1453 flush_shadow_used_ring_packed(dev
, vq
);
1454 vhost_vring_call_packed(dev
, vq
);
1458 VHOST_LOG_DEBUG(VHOST_DATA
, "(%d) %s\n", dev
->vid
, __func__
);
1460 count
= RTE_MIN(count
, MAX_PKT_BURST
);
1461 VHOST_LOG_DEBUG(VHOST_DATA
, "(%d) about to dequeue %u buffers\n",
1464 for (i
= 0; i
< count
; i
++) {
1465 struct buf_vector buf_vec
[BUF_VECTOR_MAX
];
1468 uint16_t desc_count
, nr_vec
= 0;
1471 if (unlikely(fill_vec_buf_packed(dev
, vq
,
1472 vq
->last_avail_idx
, &desc_count
,
1474 &buf_id
, &dummy_len
,
1475 VHOST_ACCESS_RO
) < 0))
1478 if (likely(dev
->dequeue_zero_copy
== 0))
1479 update_shadow_used_ring_packed(vq
, buf_id
, 0,
1482 rte_prefetch0((void *)(uintptr_t)buf_vec
[0].buf_addr
);
1484 pkts
[i
] = rte_pktmbuf_alloc(mbuf_pool
);
1485 if (unlikely(pkts
[i
] == NULL
)) {
1486 RTE_LOG(ERR
, VHOST_DATA
,
1487 "Failed to allocate memory for mbuf.\n");
1491 err
= copy_desc_to_mbuf(dev
, vq
, buf_vec
, nr_vec
, pkts
[i
],
1493 if (unlikely(err
)) {
1494 rte_pktmbuf_free(pkts
[i
]);
1498 if (unlikely(dev
->dequeue_zero_copy
)) {
1499 struct zcopy_mbuf
*zmbuf
;
1501 zmbuf
= get_zmbuf(vq
);
1503 rte_pktmbuf_free(pkts
[i
]);
1506 zmbuf
->mbuf
= pkts
[i
];
1507 zmbuf
->desc_idx
= buf_id
;
1508 zmbuf
->desc_count
= desc_count
;
1511 * Pin lock the mbuf; we will check later to see
1512 * whether the mbuf is freed (when we are the last
1513 * user) or not. If that's the case, we then could
1514 * update the used ring safely.
1516 rte_mbuf_refcnt_update(pkts
[i
], 1);
1519 TAILQ_INSERT_TAIL(&vq
->zmbuf_list
, zmbuf
, next
);
1522 vq
->last_avail_idx
+= desc_count
;
1523 if (vq
->last_avail_idx
>= vq
->size
) {
1524 vq
->last_avail_idx
-= vq
->size
;
1525 vq
->avail_wrap_counter
^= 1;
1529 if (likely(dev
->dequeue_zero_copy
== 0)) {
1530 do_data_copy_dequeue(vq
);
1531 if (unlikely(i
< count
))
1532 vq
->shadow_used_idx
= i
;
1533 if (likely(vq
->shadow_used_idx
)) {
1534 flush_shadow_used_ring_packed(dev
, vq
);
1535 vhost_vring_call_packed(dev
, vq
);
1543 rte_vhost_dequeue_burst(int vid
, uint16_t queue_id
,
1544 struct rte_mempool
*mbuf_pool
, struct rte_mbuf
**pkts
, uint16_t count
)
1546 struct virtio_net
*dev
;
1547 struct rte_mbuf
*rarp_mbuf
= NULL
;
1548 struct vhost_virtqueue
*vq
;
1550 dev
= get_device(vid
);
1554 if (unlikely(!(dev
->flags
& VIRTIO_DEV_BUILTIN_VIRTIO_NET
))) {
1555 RTE_LOG(ERR
, VHOST_DATA
,
1556 "(%d) %s: built-in vhost net backend is disabled.\n",
1557 dev
->vid
, __func__
);
1561 if (unlikely(!is_valid_virt_queue_idx(queue_id
, 1, dev
->nr_vring
))) {
1562 RTE_LOG(ERR
, VHOST_DATA
, "(%d) %s: invalid virtqueue idx %d.\n",
1563 dev
->vid
, __func__
, queue_id
);
1567 vq
= dev
->virtqueue
[queue_id
];
1569 if (unlikely(rte_spinlock_trylock(&vq
->access_lock
) == 0))
1572 if (unlikely(vq
->enabled
== 0)) {
1574 goto out_access_unlock
;
1577 if (dev
->features
& (1ULL << VIRTIO_F_IOMMU_PLATFORM
))
1578 vhost_user_iotlb_rd_lock(vq
);
1580 if (unlikely(vq
->access_ok
== 0))
1581 if (unlikely(vring_translate(dev
, vq
) < 0)) {
1587 * Construct a RARP broadcast packet, and inject it to the "pkts"
1588 * array, to looks like that guest actually send such packet.
1590 * Check user_send_rarp() for more information.
1592 * broadcast_rarp shares a cacheline in the virtio_net structure
1593 * with some fields that are accessed during enqueue and
1594 * rte_atomic16_cmpset() causes a write if using cmpxchg. This could
1595 * result in false sharing between enqueue and dequeue.
1597 * Prevent unnecessary false sharing by reading broadcast_rarp first
1598 * and only performing cmpset if the read indicates it is likely to
1601 if (unlikely(rte_atomic16_read(&dev
->broadcast_rarp
) &&
1602 rte_atomic16_cmpset((volatile uint16_t *)
1603 &dev
->broadcast_rarp
.cnt
, 1, 0))) {
1605 rarp_mbuf
= rte_net_make_rarp_packet(mbuf_pool
, &dev
->mac
);
1606 if (rarp_mbuf
== NULL
) {
1607 RTE_LOG(ERR
, VHOST_DATA
,
1608 "Failed to make RARP packet.\n");
1615 if (vq_is_packed(dev
))
1616 count
= virtio_dev_tx_packed(dev
, vq
, mbuf_pool
, pkts
, count
);
1618 count
= virtio_dev_tx_split(dev
, vq
, mbuf_pool
, pkts
, count
);
1621 if (dev
->features
& (1ULL << VIRTIO_F_IOMMU_PLATFORM
))
1622 vhost_user_iotlb_rd_unlock(vq
);
1625 rte_spinlock_unlock(&vq
->access_lock
);
1627 if (unlikely(rarp_mbuf
!= NULL
)) {
1629 * Inject it to the head of "pkts" array, so that switch's mac
1630 * learning table will get updated first.
1632 memmove(&pkts
[1], pkts
, count
* sizeof(struct rte_mbuf
*));
1633 pkts
[0] = rarp_mbuf
;