1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2010-2016 Intel Corporation
7 #include <linux/virtio_net.h>
10 #include <rte_memcpy.h>
11 #include <rte_ether.h>
13 #include <rte_vhost.h>
18 #include <rte_spinlock.h>
19 #include <rte_malloc.h>
24 #define MAX_PKT_BURST 32
26 #define MAX_BATCH_LEN 256
28 static __rte_always_inline
bool
29 rxvq_is_mergeable(struct virtio_net
*dev
)
31 return dev
->features
& (1ULL << VIRTIO_NET_F_MRG_RXBUF
);
34 static __rte_always_inline
bool
35 virtio_net_is_inorder(struct virtio_net
*dev
)
37 return dev
->features
& (1ULL << VIRTIO_F_IN_ORDER
);
41 is_valid_virt_queue_idx(uint32_t idx
, int is_tx
, uint32_t nr_vring
)
43 return (is_tx
^ (idx
& 1)) == 0 && idx
< nr_vring
;
47 do_data_copy_enqueue(struct virtio_net
*dev
, struct vhost_virtqueue
*vq
)
49 struct batch_copy_elem
*elem
= vq
->batch_copy_elems
;
50 uint16_t count
= vq
->batch_copy_nb_elems
;
53 for (i
= 0; i
< count
; i
++) {
54 rte_memcpy(elem
[i
].dst
, elem
[i
].src
, elem
[i
].len
);
55 vhost_log_cache_write_iova(dev
, vq
, elem
[i
].log_addr
,
57 PRINT_PACKET(dev
, (uintptr_t)elem
[i
].dst
, elem
[i
].len
, 0);
60 vq
->batch_copy_nb_elems
= 0;
64 do_data_copy_dequeue(struct vhost_virtqueue
*vq
)
66 struct batch_copy_elem
*elem
= vq
->batch_copy_elems
;
67 uint16_t count
= vq
->batch_copy_nb_elems
;
70 for (i
= 0; i
< count
; i
++)
71 rte_memcpy(elem
[i
].dst
, elem
[i
].src
, elem
[i
].len
);
73 vq
->batch_copy_nb_elems
= 0;
76 static __rte_always_inline
void
77 do_flush_shadow_used_ring_split(struct virtio_net
*dev
,
78 struct vhost_virtqueue
*vq
,
79 uint16_t to
, uint16_t from
, uint16_t size
)
81 rte_memcpy(&vq
->used
->ring
[to
],
82 &vq
->shadow_used_split
[from
],
83 size
* sizeof(struct vring_used_elem
));
84 vhost_log_cache_used_vring(dev
, vq
,
85 offsetof(struct vring_used
, ring
[to
]),
86 size
* sizeof(struct vring_used_elem
));
89 static __rte_always_inline
void
90 flush_shadow_used_ring_split(struct virtio_net
*dev
, struct vhost_virtqueue
*vq
)
92 uint16_t used_idx
= vq
->last_used_idx
& (vq
->size
- 1);
94 if (used_idx
+ vq
->shadow_used_idx
<= vq
->size
) {
95 do_flush_shadow_used_ring_split(dev
, vq
, used_idx
, 0,
100 /* update used ring interval [used_idx, vq->size] */
101 size
= vq
->size
- used_idx
;
102 do_flush_shadow_used_ring_split(dev
, vq
, used_idx
, 0, size
);
104 /* update the left half used ring interval [0, left_size] */
105 do_flush_shadow_used_ring_split(dev
, vq
, 0, size
,
106 vq
->shadow_used_idx
- size
);
108 vq
->last_used_idx
+= vq
->shadow_used_idx
;
110 vhost_log_cache_sync(dev
, vq
);
112 __atomic_add_fetch(&vq
->used
->idx
, vq
->shadow_used_idx
,
114 vq
->shadow_used_idx
= 0;
115 vhost_log_used_vring(dev
, vq
, offsetof(struct vring_used
, idx
),
116 sizeof(vq
->used
->idx
));
119 static __rte_always_inline
void
120 update_shadow_used_ring_split(struct vhost_virtqueue
*vq
,
121 uint16_t desc_idx
, uint32_t len
)
123 uint16_t i
= vq
->shadow_used_idx
++;
125 vq
->shadow_used_split
[i
].id
= desc_idx
;
126 vq
->shadow_used_split
[i
].len
= len
;
129 static __rte_always_inline
void
130 vhost_flush_enqueue_shadow_packed(struct virtio_net
*dev
,
131 struct vhost_virtqueue
*vq
)
134 uint16_t used_idx
= vq
->last_used_idx
;
135 uint16_t head_idx
= vq
->last_used_idx
;
136 uint16_t head_flags
= 0;
138 /* Split loop in two to save memory barriers */
139 for (i
= 0; i
< vq
->shadow_used_idx
; i
++) {
140 vq
->desc_packed
[used_idx
].id
= vq
->shadow_used_packed
[i
].id
;
141 vq
->desc_packed
[used_idx
].len
= vq
->shadow_used_packed
[i
].len
;
143 used_idx
+= vq
->shadow_used_packed
[i
].count
;
144 if (used_idx
>= vq
->size
)
145 used_idx
-= vq
->size
;
150 for (i
= 0; i
< vq
->shadow_used_idx
; i
++) {
153 if (vq
->shadow_used_packed
[i
].len
)
154 flags
= VRING_DESC_F_WRITE
;
158 if (vq
->used_wrap_counter
) {
159 flags
|= VRING_DESC_F_USED
;
160 flags
|= VRING_DESC_F_AVAIL
;
162 flags
&= ~VRING_DESC_F_USED
;
163 flags
&= ~VRING_DESC_F_AVAIL
;
167 vq
->desc_packed
[vq
->last_used_idx
].flags
= flags
;
169 vhost_log_cache_used_vring(dev
, vq
,
171 sizeof(struct vring_packed_desc
),
172 sizeof(struct vring_packed_desc
));
174 head_idx
= vq
->last_used_idx
;
178 vq_inc_last_used_packed(vq
, vq
->shadow_used_packed
[i
].count
);
181 vq
->desc_packed
[head_idx
].flags
= head_flags
;
183 vhost_log_cache_used_vring(dev
, vq
,
185 sizeof(struct vring_packed_desc
),
186 sizeof(struct vring_packed_desc
));
188 vq
->shadow_used_idx
= 0;
189 vhost_log_cache_sync(dev
, vq
);
192 static __rte_always_inline
void
193 vhost_flush_dequeue_shadow_packed(struct virtio_net
*dev
,
194 struct vhost_virtqueue
*vq
)
196 struct vring_used_elem_packed
*used_elem
= &vq
->shadow_used_packed
[0];
198 vq
->desc_packed
[vq
->shadow_last_used_idx
].id
= used_elem
->id
;
200 vq
->desc_packed
[vq
->shadow_last_used_idx
].flags
= used_elem
->flags
;
202 vhost_log_cache_used_vring(dev
, vq
, vq
->shadow_last_used_idx
*
203 sizeof(struct vring_packed_desc
),
204 sizeof(struct vring_packed_desc
));
205 vq
->shadow_used_idx
= 0;
206 vhost_log_cache_sync(dev
, vq
);
209 static __rte_always_inline
void
210 vhost_flush_enqueue_batch_packed(struct virtio_net
*dev
,
211 struct vhost_virtqueue
*vq
,
218 if (vq
->shadow_used_idx
) {
219 do_data_copy_enqueue(dev
, vq
);
220 vhost_flush_enqueue_shadow_packed(dev
, vq
);
223 flags
= PACKED_DESC_ENQUEUE_USED_FLAG(vq
->used_wrap_counter
);
225 vhost_for_each_try_unroll(i
, 0, PACKED_BATCH_SIZE
) {
226 vq
->desc_packed
[vq
->last_used_idx
+ i
].id
= ids
[i
];
227 vq
->desc_packed
[vq
->last_used_idx
+ i
].len
= lens
[i
];
232 vhost_for_each_try_unroll(i
, 0, PACKED_BATCH_SIZE
)
233 vq
->desc_packed
[vq
->last_used_idx
+ i
].flags
= flags
;
235 vhost_log_cache_used_vring(dev
, vq
, vq
->last_used_idx
*
236 sizeof(struct vring_packed_desc
),
237 sizeof(struct vring_packed_desc
) *
239 vhost_log_cache_sync(dev
, vq
);
241 vq_inc_last_used_packed(vq
, PACKED_BATCH_SIZE
);
244 static __rte_always_inline
void
245 vhost_shadow_dequeue_batch_packed_inorder(struct vhost_virtqueue
*vq
,
248 vq
->shadow_used_packed
[0].id
= id
;
250 if (!vq
->shadow_used_idx
) {
251 vq
->shadow_last_used_idx
= vq
->last_used_idx
;
252 vq
->shadow_used_packed
[0].flags
=
253 PACKED_DESC_DEQUEUE_USED_FLAG(vq
->used_wrap_counter
);
254 vq
->shadow_used_packed
[0].len
= 0;
255 vq
->shadow_used_packed
[0].count
= 1;
256 vq
->shadow_used_idx
++;
259 vq_inc_last_used_packed(vq
, PACKED_BATCH_SIZE
);
262 static __rte_always_inline
void
263 vhost_shadow_dequeue_batch_packed(struct virtio_net
*dev
,
264 struct vhost_virtqueue
*vq
,
271 flags
= PACKED_DESC_DEQUEUE_USED_FLAG(vq
->used_wrap_counter
);
273 if (!vq
->shadow_used_idx
) {
274 vq
->shadow_last_used_idx
= vq
->last_used_idx
;
275 vq
->shadow_used_packed
[0].id
= ids
[0];
276 vq
->shadow_used_packed
[0].len
= 0;
277 vq
->shadow_used_packed
[0].count
= 1;
278 vq
->shadow_used_packed
[0].flags
= flags
;
279 vq
->shadow_used_idx
++;
284 vhost_for_each_try_unroll(i
, begin
, PACKED_BATCH_SIZE
) {
285 vq
->desc_packed
[vq
->last_used_idx
+ i
].id
= ids
[i
];
286 vq
->desc_packed
[vq
->last_used_idx
+ i
].len
= 0;
290 vhost_for_each_try_unroll(i
, begin
, PACKED_BATCH_SIZE
)
291 vq
->desc_packed
[vq
->last_used_idx
+ i
].flags
= flags
;
293 vhost_log_cache_used_vring(dev
, vq
, vq
->last_used_idx
*
294 sizeof(struct vring_packed_desc
),
295 sizeof(struct vring_packed_desc
) *
297 vhost_log_cache_sync(dev
, vq
);
299 vq_inc_last_used_packed(vq
, PACKED_BATCH_SIZE
);
302 static __rte_always_inline
void
303 vhost_shadow_dequeue_single_packed(struct vhost_virtqueue
*vq
,
309 flags
= vq
->desc_packed
[vq
->last_used_idx
].flags
;
310 if (vq
->used_wrap_counter
) {
311 flags
|= VRING_DESC_F_USED
;
312 flags
|= VRING_DESC_F_AVAIL
;
314 flags
&= ~VRING_DESC_F_USED
;
315 flags
&= ~VRING_DESC_F_AVAIL
;
318 if (!vq
->shadow_used_idx
) {
319 vq
->shadow_last_used_idx
= vq
->last_used_idx
;
321 vq
->shadow_used_packed
[0].id
= buf_id
;
322 vq
->shadow_used_packed
[0].len
= 0;
323 vq
->shadow_used_packed
[0].flags
= flags
;
324 vq
->shadow_used_idx
++;
326 vq
->desc_packed
[vq
->last_used_idx
].id
= buf_id
;
327 vq
->desc_packed
[vq
->last_used_idx
].len
= 0;
328 vq
->desc_packed
[vq
->last_used_idx
].flags
= flags
;
331 vq_inc_last_used_packed(vq
, count
);
334 static __rte_always_inline
void
335 vhost_shadow_dequeue_single_packed_inorder(struct vhost_virtqueue
*vq
,
341 vq
->shadow_used_packed
[0].id
= buf_id
;
343 flags
= vq
->desc_packed
[vq
->last_used_idx
].flags
;
344 if (vq
->used_wrap_counter
) {
345 flags
|= VRING_DESC_F_USED
;
346 flags
|= VRING_DESC_F_AVAIL
;
348 flags
&= ~VRING_DESC_F_USED
;
349 flags
&= ~VRING_DESC_F_AVAIL
;
352 if (!vq
->shadow_used_idx
) {
353 vq
->shadow_last_used_idx
= vq
->last_used_idx
;
354 vq
->shadow_used_packed
[0].len
= 0;
355 vq
->shadow_used_packed
[0].flags
= flags
;
356 vq
->shadow_used_idx
++;
359 vq_inc_last_used_packed(vq
, count
);
362 static __rte_always_inline
void
363 vhost_shadow_enqueue_single_packed(struct virtio_net
*dev
,
364 struct vhost_virtqueue
*vq
,
368 uint16_t num_buffers
)
371 for (i
= 0; i
< num_buffers
; i
++) {
372 /* enqueue shadow flush action aligned with batch num */
373 if (!vq
->shadow_used_idx
)
374 vq
->shadow_aligned_idx
= vq
->last_used_idx
&
376 vq
->shadow_used_packed
[vq
->shadow_used_idx
].id
= id
[i
];
377 vq
->shadow_used_packed
[vq
->shadow_used_idx
].len
= len
[i
];
378 vq
->shadow_used_packed
[vq
->shadow_used_idx
].count
= count
[i
];
379 vq
->shadow_aligned_idx
+= count
[i
];
380 vq
->shadow_used_idx
++;
383 if (vq
->shadow_aligned_idx
>= PACKED_BATCH_SIZE
) {
384 do_data_copy_enqueue(dev
, vq
);
385 vhost_flush_enqueue_shadow_packed(dev
, vq
);
389 /* avoid write operation when necessary, to lessen cache issues */
390 #define ASSIGN_UNLESS_EQUAL(var, val) do { \
391 if ((var) != (val)) \
395 static __rte_always_inline
void
396 virtio_enqueue_offload(struct rte_mbuf
*m_buf
, struct virtio_net_hdr
*net_hdr
)
398 uint64_t csum_l4
= m_buf
->ol_flags
& PKT_TX_L4_MASK
;
400 if (m_buf
->ol_flags
& PKT_TX_TCP_SEG
)
401 csum_l4
|= PKT_TX_TCP_CKSUM
;
404 net_hdr
->flags
= VIRTIO_NET_HDR_F_NEEDS_CSUM
;
405 net_hdr
->csum_start
= m_buf
->l2_len
+ m_buf
->l3_len
;
408 case PKT_TX_TCP_CKSUM
:
409 net_hdr
->csum_offset
= (offsetof(struct rte_tcp_hdr
,
412 case PKT_TX_UDP_CKSUM
:
413 net_hdr
->csum_offset
= (offsetof(struct rte_udp_hdr
,
416 case PKT_TX_SCTP_CKSUM
:
417 net_hdr
->csum_offset
= (offsetof(struct rte_sctp_hdr
,
422 ASSIGN_UNLESS_EQUAL(net_hdr
->csum_start
, 0);
423 ASSIGN_UNLESS_EQUAL(net_hdr
->csum_offset
, 0);
424 ASSIGN_UNLESS_EQUAL(net_hdr
->flags
, 0);
427 /* IP cksum verification cannot be bypassed, then calculate here */
428 if (m_buf
->ol_flags
& PKT_TX_IP_CKSUM
) {
429 struct rte_ipv4_hdr
*ipv4_hdr
;
431 ipv4_hdr
= rte_pktmbuf_mtod_offset(m_buf
, struct rte_ipv4_hdr
*,
433 ipv4_hdr
->hdr_checksum
= 0;
434 ipv4_hdr
->hdr_checksum
= rte_ipv4_cksum(ipv4_hdr
);
437 if (m_buf
->ol_flags
& PKT_TX_TCP_SEG
) {
438 if (m_buf
->ol_flags
& PKT_TX_IPV4
)
439 net_hdr
->gso_type
= VIRTIO_NET_HDR_GSO_TCPV4
;
441 net_hdr
->gso_type
= VIRTIO_NET_HDR_GSO_TCPV6
;
442 net_hdr
->gso_size
= m_buf
->tso_segsz
;
443 net_hdr
->hdr_len
= m_buf
->l2_len
+ m_buf
->l3_len
445 } else if (m_buf
->ol_flags
& PKT_TX_UDP_SEG
) {
446 net_hdr
->gso_type
= VIRTIO_NET_HDR_GSO_UDP
;
447 net_hdr
->gso_size
= m_buf
->tso_segsz
;
448 net_hdr
->hdr_len
= m_buf
->l2_len
+ m_buf
->l3_len
+
451 ASSIGN_UNLESS_EQUAL(net_hdr
->gso_type
, 0);
452 ASSIGN_UNLESS_EQUAL(net_hdr
->gso_size
, 0);
453 ASSIGN_UNLESS_EQUAL(net_hdr
->hdr_len
, 0);
457 static __rte_always_inline
int
458 map_one_desc(struct virtio_net
*dev
, struct vhost_virtqueue
*vq
,
459 struct buf_vector
*buf_vec
, uint16_t *vec_idx
,
460 uint64_t desc_iova
, uint64_t desc_len
, uint8_t perm
)
462 uint16_t vec_id
= *vec_idx
;
466 uint64_t desc_chunck_len
= desc_len
;
468 if (unlikely(vec_id
>= BUF_VECTOR_MAX
))
471 desc_addr
= vhost_iova_to_vva(dev
, vq
,
475 if (unlikely(!desc_addr
))
478 rte_prefetch0((void *)(uintptr_t)desc_addr
);
480 buf_vec
[vec_id
].buf_iova
= desc_iova
;
481 buf_vec
[vec_id
].buf_addr
= desc_addr
;
482 buf_vec
[vec_id
].buf_len
= desc_chunck_len
;
484 desc_len
-= desc_chunck_len
;
485 desc_iova
+= desc_chunck_len
;
493 static __rte_always_inline
int
494 fill_vec_buf_split(struct virtio_net
*dev
, struct vhost_virtqueue
*vq
,
495 uint32_t avail_idx
, uint16_t *vec_idx
,
496 struct buf_vector
*buf_vec
, uint16_t *desc_chain_head
,
497 uint32_t *desc_chain_len
, uint8_t perm
)
499 uint16_t idx
= vq
->avail
->ring
[avail_idx
& (vq
->size
- 1)];
500 uint16_t vec_id
= *vec_idx
;
503 uint32_t nr_descs
= vq
->size
;
505 struct vring_desc
*descs
= vq
->desc
;
506 struct vring_desc
*idesc
= NULL
;
508 if (unlikely(idx
>= vq
->size
))
511 *desc_chain_head
= idx
;
513 if (vq
->desc
[idx
].flags
& VRING_DESC_F_INDIRECT
) {
514 dlen
= vq
->desc
[idx
].len
;
515 nr_descs
= dlen
/ sizeof(struct vring_desc
);
516 if (unlikely(nr_descs
> vq
->size
))
519 descs
= (struct vring_desc
*)(uintptr_t)
520 vhost_iova_to_vva(dev
, vq
, vq
->desc
[idx
].addr
,
523 if (unlikely(!descs
))
526 if (unlikely(dlen
< vq
->desc
[idx
].len
)) {
528 * The indirect desc table is not contiguous
529 * in process VA space, we have to copy it.
531 idesc
= vhost_alloc_copy_ind_table(dev
, vq
,
532 vq
->desc
[idx
].addr
, vq
->desc
[idx
].len
);
533 if (unlikely(!idesc
))
543 if (unlikely(idx
>= nr_descs
|| cnt
++ >= nr_descs
)) {
544 free_ind_table(idesc
);
548 len
+= descs
[idx
].len
;
550 if (unlikely(map_one_desc(dev
, vq
, buf_vec
, &vec_id
,
551 descs
[idx
].addr
, descs
[idx
].len
,
553 free_ind_table(idesc
);
557 if ((descs
[idx
].flags
& VRING_DESC_F_NEXT
) == 0)
560 idx
= descs
[idx
].next
;
563 *desc_chain_len
= len
;
566 if (unlikely(!!idesc
))
567 free_ind_table(idesc
);
573 * Returns -1 on fail, 0 on success
576 reserve_avail_buf_split(struct virtio_net
*dev
, struct vhost_virtqueue
*vq
,
577 uint32_t size
, struct buf_vector
*buf_vec
,
578 uint16_t *num_buffers
, uint16_t avail_head
,
582 uint16_t vec_idx
= 0;
583 uint16_t max_tries
, tries
= 0;
585 uint16_t head_idx
= 0;
589 cur_idx
= vq
->last_avail_idx
;
591 if (rxvq_is_mergeable(dev
))
592 max_tries
= vq
->size
- 1;
597 if (unlikely(cur_idx
== avail_head
))
600 * if we tried all available ring items, and still
601 * can't get enough buf, it means something abnormal
604 if (unlikely(++tries
> max_tries
))
607 if (unlikely(fill_vec_buf_split(dev
, vq
, cur_idx
,
610 VHOST_ACCESS_RW
) < 0))
612 len
= RTE_MIN(len
, size
);
613 update_shadow_used_ring_split(vq
, head_idx
, len
);
625 static __rte_always_inline
int
626 fill_vec_buf_packed_indirect(struct virtio_net
*dev
,
627 struct vhost_virtqueue
*vq
,
628 struct vring_packed_desc
*desc
, uint16_t *vec_idx
,
629 struct buf_vector
*buf_vec
, uint32_t *len
, uint8_t perm
)
633 uint16_t vec_id
= *vec_idx
;
635 struct vring_packed_desc
*descs
, *idescs
= NULL
;
638 descs
= (struct vring_packed_desc
*)(uintptr_t)
639 vhost_iova_to_vva(dev
, vq
, desc
->addr
, &dlen
, VHOST_ACCESS_RO
);
640 if (unlikely(!descs
))
643 if (unlikely(dlen
< desc
->len
)) {
645 * The indirect desc table is not contiguous
646 * in process VA space, we have to copy it.
648 idescs
= vhost_alloc_copy_ind_table(dev
,
649 vq
, desc
->addr
, desc
->len
);
650 if (unlikely(!idescs
))
656 nr_descs
= desc
->len
/ sizeof(struct vring_packed_desc
);
657 if (unlikely(nr_descs
>= vq
->size
)) {
658 free_ind_table(idescs
);
662 for (i
= 0; i
< nr_descs
; i
++) {
663 if (unlikely(vec_id
>= BUF_VECTOR_MAX
)) {
664 free_ind_table(idescs
);
668 *len
+= descs
[i
].len
;
669 if (unlikely(map_one_desc(dev
, vq
, buf_vec
, &vec_id
,
670 descs
[i
].addr
, descs
[i
].len
,
676 if (unlikely(!!idescs
))
677 free_ind_table(idescs
);
682 static __rte_always_inline
int
683 fill_vec_buf_packed(struct virtio_net
*dev
, struct vhost_virtqueue
*vq
,
684 uint16_t avail_idx
, uint16_t *desc_count
,
685 struct buf_vector
*buf_vec
, uint16_t *vec_idx
,
686 uint16_t *buf_id
, uint32_t *len
, uint8_t perm
)
688 bool wrap_counter
= vq
->avail_wrap_counter
;
689 struct vring_packed_desc
*descs
= vq
->desc_packed
;
690 uint16_t vec_id
= *vec_idx
;
692 if (avail_idx
< vq
->last_avail_idx
)
696 * Perform a load-acquire barrier in desc_is_avail to
697 * enforce the ordering between desc flags and desc
700 if (unlikely(!desc_is_avail(&descs
[avail_idx
], wrap_counter
)))
707 if (unlikely(vec_id
>= BUF_VECTOR_MAX
))
710 if (unlikely(*desc_count
>= vq
->size
))
714 *buf_id
= descs
[avail_idx
].id
;
716 if (descs
[avail_idx
].flags
& VRING_DESC_F_INDIRECT
) {
717 if (unlikely(fill_vec_buf_packed_indirect(dev
, vq
,
723 *len
+= descs
[avail_idx
].len
;
725 if (unlikely(map_one_desc(dev
, vq
, buf_vec
, &vec_id
,
726 descs
[avail_idx
].addr
,
727 descs
[avail_idx
].len
,
732 if ((descs
[avail_idx
].flags
& VRING_DESC_F_NEXT
) == 0)
735 if (++avail_idx
>= vq
->size
) {
736 avail_idx
-= vq
->size
;
746 static __rte_noinline
void
747 copy_vnet_hdr_to_desc(struct virtio_net
*dev
, struct vhost_virtqueue
*vq
,
748 struct buf_vector
*buf_vec
,
749 struct virtio_net_hdr_mrg_rxbuf
*hdr
)
752 uint64_t remain
= dev
->vhost_hlen
;
753 uint64_t src
= (uint64_t)(uintptr_t)hdr
, dst
;
754 uint64_t iova
= buf_vec
->buf_iova
;
757 len
= RTE_MIN(remain
,
759 dst
= buf_vec
->buf_addr
;
760 rte_memcpy((void *)(uintptr_t)dst
,
761 (void *)(uintptr_t)src
,
764 PRINT_PACKET(dev
, (uintptr_t)dst
,
766 vhost_log_cache_write_iova(dev
, vq
,
776 static __rte_always_inline
int
777 copy_mbuf_to_desc(struct virtio_net
*dev
, struct vhost_virtqueue
*vq
,
778 struct rte_mbuf
*m
, struct buf_vector
*buf_vec
,
779 uint16_t nr_vec
, uint16_t num_buffers
)
781 uint32_t vec_idx
= 0;
782 uint32_t mbuf_offset
, mbuf_avail
;
783 uint32_t buf_offset
, buf_avail
;
784 uint64_t buf_addr
, buf_iova
, buf_len
;
787 struct rte_mbuf
*hdr_mbuf
;
788 struct batch_copy_elem
*batch_copy
= vq
->batch_copy_elems
;
789 struct virtio_net_hdr_mrg_rxbuf tmp_hdr
, *hdr
= NULL
;
792 if (unlikely(m
== NULL
)) {
797 buf_addr
= buf_vec
[vec_idx
].buf_addr
;
798 buf_iova
= buf_vec
[vec_idx
].buf_iova
;
799 buf_len
= buf_vec
[vec_idx
].buf_len
;
801 if (unlikely(buf_len
< dev
->vhost_hlen
&& nr_vec
<= 1)) {
808 if (unlikely(buf_len
< dev
->vhost_hlen
))
811 hdr
= (struct virtio_net_hdr_mrg_rxbuf
*)(uintptr_t)hdr_addr
;
813 VHOST_LOG_DATA(DEBUG
, "(%d) RX: num merge buffers %d\n",
814 dev
->vid
, num_buffers
);
816 if (unlikely(buf_len
< dev
->vhost_hlen
)) {
817 buf_offset
= dev
->vhost_hlen
- buf_len
;
819 buf_addr
= buf_vec
[vec_idx
].buf_addr
;
820 buf_iova
= buf_vec
[vec_idx
].buf_iova
;
821 buf_len
= buf_vec
[vec_idx
].buf_len
;
822 buf_avail
= buf_len
- buf_offset
;
824 buf_offset
= dev
->vhost_hlen
;
825 buf_avail
= buf_len
- dev
->vhost_hlen
;
828 mbuf_avail
= rte_pktmbuf_data_len(m
);
830 while (mbuf_avail
!= 0 || m
->next
!= NULL
) {
831 /* done with current buf, get the next one */
832 if (buf_avail
== 0) {
834 if (unlikely(vec_idx
>= nr_vec
)) {
839 buf_addr
= buf_vec
[vec_idx
].buf_addr
;
840 buf_iova
= buf_vec
[vec_idx
].buf_iova
;
841 buf_len
= buf_vec
[vec_idx
].buf_len
;
847 /* done with current mbuf, get the next one */
848 if (mbuf_avail
== 0) {
852 mbuf_avail
= rte_pktmbuf_data_len(m
);
856 virtio_enqueue_offload(hdr_mbuf
, &hdr
->hdr
);
857 if (rxvq_is_mergeable(dev
))
858 ASSIGN_UNLESS_EQUAL(hdr
->num_buffers
,
861 if (unlikely(hdr
== &tmp_hdr
)) {
862 copy_vnet_hdr_to_desc(dev
, vq
, buf_vec
, hdr
);
864 PRINT_PACKET(dev
, (uintptr_t)hdr_addr
,
866 vhost_log_cache_write_iova(dev
, vq
,
874 cpy_len
= RTE_MIN(buf_avail
, mbuf_avail
);
876 if (likely(cpy_len
> MAX_BATCH_LEN
||
877 vq
->batch_copy_nb_elems
>= vq
->size
)) {
878 rte_memcpy((void *)((uintptr_t)(buf_addr
+ buf_offset
)),
879 rte_pktmbuf_mtod_offset(m
, void *, mbuf_offset
),
881 vhost_log_cache_write_iova(dev
, vq
,
882 buf_iova
+ buf_offset
,
884 PRINT_PACKET(dev
, (uintptr_t)(buf_addr
+ buf_offset
),
887 batch_copy
[vq
->batch_copy_nb_elems
].dst
=
888 (void *)((uintptr_t)(buf_addr
+ buf_offset
));
889 batch_copy
[vq
->batch_copy_nb_elems
].src
=
890 rte_pktmbuf_mtod_offset(m
, void *, mbuf_offset
);
891 batch_copy
[vq
->batch_copy_nb_elems
].log_addr
=
892 buf_iova
+ buf_offset
;
893 batch_copy
[vq
->batch_copy_nb_elems
].len
= cpy_len
;
894 vq
->batch_copy_nb_elems
++;
897 mbuf_avail
-= cpy_len
;
898 mbuf_offset
+= cpy_len
;
899 buf_avail
-= cpy_len
;
900 buf_offset
+= cpy_len
;
908 static __rte_always_inline
int
909 vhost_enqueue_single_packed(struct virtio_net
*dev
,
910 struct vhost_virtqueue
*vq
,
911 struct rte_mbuf
*pkt
,
912 struct buf_vector
*buf_vec
,
916 uint16_t avail_idx
= vq
->last_avail_idx
;
917 uint16_t max_tries
, tries
= 0;
921 uint32_t size
= pkt
->pkt_len
+ dev
->vhost_hlen
;
922 uint16_t num_buffers
= 0;
923 uint32_t buffer_len
[vq
->size
];
924 uint16_t buffer_buf_id
[vq
->size
];
925 uint16_t buffer_desc_count
[vq
->size
];
927 if (rxvq_is_mergeable(dev
))
928 max_tries
= vq
->size
- 1;
934 * if we tried all available ring items, and still
935 * can't get enough buf, it means something abnormal
938 if (unlikely(++tries
> max_tries
))
941 if (unlikely(fill_vec_buf_packed(dev
, vq
,
942 avail_idx
, &desc_count
,
945 VHOST_ACCESS_RW
) < 0))
948 len
= RTE_MIN(len
, size
);
951 buffer_len
[num_buffers
] = len
;
952 buffer_buf_id
[num_buffers
] = buf_id
;
953 buffer_desc_count
[num_buffers
] = desc_count
;
956 *nr_descs
+= desc_count
;
957 avail_idx
+= desc_count
;
958 if (avail_idx
>= vq
->size
)
959 avail_idx
-= vq
->size
;
962 if (copy_mbuf_to_desc(dev
, vq
, pkt
, buf_vec
, nr_vec
, num_buffers
) < 0)
965 vhost_shadow_enqueue_single_packed(dev
, vq
, buffer_len
, buffer_buf_id
,
966 buffer_desc_count
, num_buffers
);
971 static __rte_noinline
uint32_t
972 virtio_dev_rx_split(struct virtio_net
*dev
, struct vhost_virtqueue
*vq
,
973 struct rte_mbuf
**pkts
, uint32_t count
)
975 uint32_t pkt_idx
= 0;
976 uint16_t num_buffers
;
977 struct buf_vector buf_vec
[BUF_VECTOR_MAX
];
981 * The ordering between avail index and
982 * desc reads needs to be enforced.
984 avail_head
= __atomic_load_n(&vq
->avail
->idx
, __ATOMIC_ACQUIRE
);
986 rte_prefetch0(&vq
->avail
->ring
[vq
->last_avail_idx
& (vq
->size
- 1)]);
988 for (pkt_idx
= 0; pkt_idx
< count
; pkt_idx
++) {
989 uint32_t pkt_len
= pkts
[pkt_idx
]->pkt_len
+ dev
->vhost_hlen
;
992 if (unlikely(reserve_avail_buf_split(dev
, vq
,
993 pkt_len
, buf_vec
, &num_buffers
,
994 avail_head
, &nr_vec
) < 0)) {
995 VHOST_LOG_DATA(DEBUG
,
996 "(%d) failed to get enough desc from vring\n",
998 vq
->shadow_used_idx
-= num_buffers
;
1002 VHOST_LOG_DATA(DEBUG
, "(%d) current index %d | end index %d\n",
1003 dev
->vid
, vq
->last_avail_idx
,
1004 vq
->last_avail_idx
+ num_buffers
);
1006 if (copy_mbuf_to_desc(dev
, vq
, pkts
[pkt_idx
],
1009 vq
->shadow_used_idx
-= num_buffers
;
1013 vq
->last_avail_idx
+= num_buffers
;
1016 do_data_copy_enqueue(dev
, vq
);
1018 if (likely(vq
->shadow_used_idx
)) {
1019 flush_shadow_used_ring_split(dev
, vq
);
1020 vhost_vring_call_split(dev
, vq
);
1026 static __rte_always_inline
int
1027 virtio_dev_rx_batch_packed(struct virtio_net
*dev
,
1028 struct vhost_virtqueue
*vq
,
1029 struct rte_mbuf
**pkts
)
1031 bool wrap_counter
= vq
->avail_wrap_counter
;
1032 struct vring_packed_desc
*descs
= vq
->desc_packed
;
1033 uint16_t avail_idx
= vq
->last_avail_idx
;
1034 uint64_t desc_addrs
[PACKED_BATCH_SIZE
];
1035 struct virtio_net_hdr_mrg_rxbuf
*hdrs
[PACKED_BATCH_SIZE
];
1036 uint32_t buf_offset
= dev
->vhost_hlen
;
1037 uint64_t lens
[PACKED_BATCH_SIZE
];
1038 uint16_t ids
[PACKED_BATCH_SIZE
];
1041 if (unlikely(avail_idx
& PACKED_BATCH_MASK
))
1044 if (unlikely((avail_idx
+ PACKED_BATCH_SIZE
) > vq
->size
))
1047 vhost_for_each_try_unroll(i
, 0, PACKED_BATCH_SIZE
) {
1048 if (unlikely(pkts
[i
]->next
!= NULL
))
1050 if (unlikely(!desc_is_avail(&descs
[avail_idx
+ i
],
1057 vhost_for_each_try_unroll(i
, 0, PACKED_BATCH_SIZE
)
1058 lens
[i
] = descs
[avail_idx
+ i
].len
;
1060 vhost_for_each_try_unroll(i
, 0, PACKED_BATCH_SIZE
) {
1061 if (unlikely(pkts
[i
]->pkt_len
> (lens
[i
] - buf_offset
)))
1065 vhost_for_each_try_unroll(i
, 0, PACKED_BATCH_SIZE
)
1066 desc_addrs
[i
] = vhost_iova_to_vva(dev
, vq
,
1067 descs
[avail_idx
+ i
].addr
,
1071 vhost_for_each_try_unroll(i
, 0, PACKED_BATCH_SIZE
) {
1072 if (unlikely(!desc_addrs
[i
]))
1074 if (unlikely(lens
[i
] != descs
[avail_idx
+ i
].len
))
1078 vhost_for_each_try_unroll(i
, 0, PACKED_BATCH_SIZE
) {
1079 rte_prefetch0((void *)(uintptr_t)desc_addrs
[i
]);
1080 hdrs
[i
] = (struct virtio_net_hdr_mrg_rxbuf
*)
1081 (uintptr_t)desc_addrs
[i
];
1082 lens
[i
] = pkts
[i
]->pkt_len
+ dev
->vhost_hlen
;
1085 vhost_for_each_try_unroll(i
, 0, PACKED_BATCH_SIZE
)
1086 virtio_enqueue_offload(pkts
[i
], &hdrs
[i
]->hdr
);
1088 vq_inc_last_avail_packed(vq
, PACKED_BATCH_SIZE
);
1090 vhost_for_each_try_unroll(i
, 0, PACKED_BATCH_SIZE
) {
1091 rte_memcpy((void *)(uintptr_t)(desc_addrs
[i
] + buf_offset
),
1092 rte_pktmbuf_mtod_offset(pkts
[i
], void *, 0),
1096 vhost_for_each_try_unroll(i
, 0, PACKED_BATCH_SIZE
)
1097 vhost_log_cache_write_iova(dev
, vq
, descs
[avail_idx
+ i
].addr
,
1100 vhost_for_each_try_unroll(i
, 0, PACKED_BATCH_SIZE
)
1101 ids
[i
] = descs
[avail_idx
+ i
].id
;
1103 vhost_flush_enqueue_batch_packed(dev
, vq
, lens
, ids
);
1108 static __rte_always_inline
int16_t
1109 virtio_dev_rx_single_packed(struct virtio_net
*dev
,
1110 struct vhost_virtqueue
*vq
,
1111 struct rte_mbuf
*pkt
)
1113 struct buf_vector buf_vec
[BUF_VECTOR_MAX
];
1114 uint16_t nr_descs
= 0;
1117 if (unlikely(vhost_enqueue_single_packed(dev
, vq
, pkt
, buf_vec
,
1119 VHOST_LOG_DATA(DEBUG
,
1120 "(%d) failed to get enough desc from vring\n",
1125 VHOST_LOG_DATA(DEBUG
, "(%d) current index %d | end index %d\n",
1126 dev
->vid
, vq
->last_avail_idx
,
1127 vq
->last_avail_idx
+ nr_descs
);
1129 vq_inc_last_avail_packed(vq
, nr_descs
);
1134 static __rte_noinline
uint32_t
1135 virtio_dev_rx_packed(struct virtio_net
*dev
,
1136 struct vhost_virtqueue
*vq
,
1137 struct rte_mbuf
**pkts
,
1140 uint32_t pkt_idx
= 0;
1141 uint32_t remained
= count
;
1144 rte_prefetch0(&vq
->desc_packed
[vq
->last_avail_idx
]);
1146 if (remained
>= PACKED_BATCH_SIZE
) {
1147 if (!virtio_dev_rx_batch_packed(dev
, vq
,
1149 pkt_idx
+= PACKED_BATCH_SIZE
;
1150 remained
-= PACKED_BATCH_SIZE
;
1155 if (virtio_dev_rx_single_packed(dev
, vq
, pkts
[pkt_idx
]))
1160 } while (pkt_idx
< count
);
1162 if (vq
->shadow_used_idx
) {
1163 do_data_copy_enqueue(dev
, vq
);
1164 vhost_flush_enqueue_shadow_packed(dev
, vq
);
1168 vhost_vring_call_packed(dev
, vq
);
1173 static __rte_always_inline
uint32_t
1174 virtio_dev_rx(struct virtio_net
*dev
, uint16_t queue_id
,
1175 struct rte_mbuf
**pkts
, uint32_t count
)
1177 struct vhost_virtqueue
*vq
;
1180 VHOST_LOG_DATA(DEBUG
, "(%d) %s\n", dev
->vid
, __func__
);
1181 if (unlikely(!is_valid_virt_queue_idx(queue_id
, 0, dev
->nr_vring
))) {
1182 VHOST_LOG_DATA(ERR
, "(%d) %s: invalid virtqueue idx %d.\n",
1183 dev
->vid
, __func__
, queue_id
);
1187 vq
= dev
->virtqueue
[queue_id
];
1189 rte_spinlock_lock(&vq
->access_lock
);
1191 if (unlikely(vq
->enabled
== 0))
1192 goto out_access_unlock
;
1194 if (dev
->features
& (1ULL << VIRTIO_F_IOMMU_PLATFORM
))
1195 vhost_user_iotlb_rd_lock(vq
);
1197 if (unlikely(vq
->access_ok
== 0))
1198 if (unlikely(vring_translate(dev
, vq
) < 0))
1201 count
= RTE_MIN((uint32_t)MAX_PKT_BURST
, count
);
1205 if (vq_is_packed(dev
))
1206 nb_tx
= virtio_dev_rx_packed(dev
, vq
, pkts
, count
);
1208 nb_tx
= virtio_dev_rx_split(dev
, vq
, pkts
, count
);
1211 if (dev
->features
& (1ULL << VIRTIO_F_IOMMU_PLATFORM
))
1212 vhost_user_iotlb_rd_unlock(vq
);
1215 rte_spinlock_unlock(&vq
->access_lock
);
1221 rte_vhost_enqueue_burst(int vid
, uint16_t queue_id
,
1222 struct rte_mbuf
**pkts
, uint16_t count
)
1224 struct virtio_net
*dev
= get_device(vid
);
1229 if (unlikely(!(dev
->flags
& VIRTIO_DEV_BUILTIN_VIRTIO_NET
))) {
1231 "(%d) %s: built-in vhost net backend is disabled.\n",
1232 dev
->vid
, __func__
);
1236 return virtio_dev_rx(dev
, queue_id
, pkts
, count
);
1240 virtio_net_with_host_offload(struct virtio_net
*dev
)
1243 ((1ULL << VIRTIO_NET_F_CSUM
) |
1244 (1ULL << VIRTIO_NET_F_HOST_ECN
) |
1245 (1ULL << VIRTIO_NET_F_HOST_TSO4
) |
1246 (1ULL << VIRTIO_NET_F_HOST_TSO6
) |
1247 (1ULL << VIRTIO_NET_F_HOST_UFO
)))
1254 parse_ethernet(struct rte_mbuf
*m
, uint16_t *l4_proto
, void **l4_hdr
)
1256 struct rte_ipv4_hdr
*ipv4_hdr
;
1257 struct rte_ipv6_hdr
*ipv6_hdr
;
1258 void *l3_hdr
= NULL
;
1259 struct rte_ether_hdr
*eth_hdr
;
1262 eth_hdr
= rte_pktmbuf_mtod(m
, struct rte_ether_hdr
*);
1264 m
->l2_len
= sizeof(struct rte_ether_hdr
);
1265 ethertype
= rte_be_to_cpu_16(eth_hdr
->ether_type
);
1267 if (ethertype
== RTE_ETHER_TYPE_VLAN
) {
1268 struct rte_vlan_hdr
*vlan_hdr
=
1269 (struct rte_vlan_hdr
*)(eth_hdr
+ 1);
1271 m
->l2_len
+= sizeof(struct rte_vlan_hdr
);
1272 ethertype
= rte_be_to_cpu_16(vlan_hdr
->eth_proto
);
1275 l3_hdr
= (char *)eth_hdr
+ m
->l2_len
;
1277 switch (ethertype
) {
1278 case RTE_ETHER_TYPE_IPV4
:
1280 *l4_proto
= ipv4_hdr
->next_proto_id
;
1281 m
->l3_len
= (ipv4_hdr
->version_ihl
& 0x0f) * 4;
1282 *l4_hdr
= (char *)l3_hdr
+ m
->l3_len
;
1283 m
->ol_flags
|= PKT_TX_IPV4
;
1285 case RTE_ETHER_TYPE_IPV6
:
1287 *l4_proto
= ipv6_hdr
->proto
;
1288 m
->l3_len
= sizeof(struct rte_ipv6_hdr
);
1289 *l4_hdr
= (char *)l3_hdr
+ m
->l3_len
;
1290 m
->ol_flags
|= PKT_TX_IPV6
;
1300 static __rte_always_inline
void
1301 vhost_dequeue_offload(struct virtio_net_hdr
*hdr
, struct rte_mbuf
*m
)
1303 uint16_t l4_proto
= 0;
1304 void *l4_hdr
= NULL
;
1305 struct rte_tcp_hdr
*tcp_hdr
= NULL
;
1307 if (hdr
->flags
== 0 && hdr
->gso_type
== VIRTIO_NET_HDR_GSO_NONE
)
1310 parse_ethernet(m
, &l4_proto
, &l4_hdr
);
1311 if (hdr
->flags
== VIRTIO_NET_HDR_F_NEEDS_CSUM
) {
1312 if (hdr
->csum_start
== (m
->l2_len
+ m
->l3_len
)) {
1313 switch (hdr
->csum_offset
) {
1314 case (offsetof(struct rte_tcp_hdr
, cksum
)):
1315 if (l4_proto
== IPPROTO_TCP
)
1316 m
->ol_flags
|= PKT_TX_TCP_CKSUM
;
1318 case (offsetof(struct rte_udp_hdr
, dgram_cksum
)):
1319 if (l4_proto
== IPPROTO_UDP
)
1320 m
->ol_flags
|= PKT_TX_UDP_CKSUM
;
1322 case (offsetof(struct rte_sctp_hdr
, cksum
)):
1323 if (l4_proto
== IPPROTO_SCTP
)
1324 m
->ol_flags
|= PKT_TX_SCTP_CKSUM
;
1332 if (l4_hdr
&& hdr
->gso_type
!= VIRTIO_NET_HDR_GSO_NONE
) {
1333 switch (hdr
->gso_type
& ~VIRTIO_NET_HDR_GSO_ECN
) {
1334 case VIRTIO_NET_HDR_GSO_TCPV4
:
1335 case VIRTIO_NET_HDR_GSO_TCPV6
:
1337 m
->ol_flags
|= PKT_TX_TCP_SEG
;
1338 m
->tso_segsz
= hdr
->gso_size
;
1339 m
->l4_len
= (tcp_hdr
->data_off
& 0xf0) >> 2;
1341 case VIRTIO_NET_HDR_GSO_UDP
:
1342 m
->ol_flags
|= PKT_TX_UDP_SEG
;
1343 m
->tso_segsz
= hdr
->gso_size
;
1344 m
->l4_len
= sizeof(struct rte_udp_hdr
);
1347 VHOST_LOG_DATA(WARNING
,
1348 "unsupported gso type %u.\n", hdr
->gso_type
);
1354 static __rte_noinline
void
1355 copy_vnet_hdr_from_desc(struct virtio_net_hdr
*hdr
,
1356 struct buf_vector
*buf_vec
)
1359 uint64_t remain
= sizeof(struct virtio_net_hdr
);
1361 uint64_t dst
= (uint64_t)(uintptr_t)hdr
;
1364 len
= RTE_MIN(remain
, buf_vec
->buf_len
);
1365 src
= buf_vec
->buf_addr
;
1366 rte_memcpy((void *)(uintptr_t)dst
,
1367 (void *)(uintptr_t)src
, len
);
1375 static __rte_always_inline
int
1376 copy_desc_to_mbuf(struct virtio_net
*dev
, struct vhost_virtqueue
*vq
,
1377 struct buf_vector
*buf_vec
, uint16_t nr_vec
,
1378 struct rte_mbuf
*m
, struct rte_mempool
*mbuf_pool
)
1380 uint32_t buf_avail
, buf_offset
;
1381 uint64_t buf_addr
, buf_iova
, buf_len
;
1382 uint32_t mbuf_avail
, mbuf_offset
;
1384 struct rte_mbuf
*cur
= m
, *prev
= m
;
1385 struct virtio_net_hdr tmp_hdr
;
1386 struct virtio_net_hdr
*hdr
= NULL
;
1387 /* A counter to avoid desc dead loop chain */
1388 uint16_t vec_idx
= 0;
1389 struct batch_copy_elem
*batch_copy
= vq
->batch_copy_elems
;
1392 buf_addr
= buf_vec
[vec_idx
].buf_addr
;
1393 buf_iova
= buf_vec
[vec_idx
].buf_iova
;
1394 buf_len
= buf_vec
[vec_idx
].buf_len
;
1396 if (unlikely(buf_len
< dev
->vhost_hlen
&& nr_vec
<= 1)) {
1401 if (virtio_net_with_host_offload(dev
)) {
1402 if (unlikely(buf_len
< sizeof(struct virtio_net_hdr
))) {
1404 * No luck, the virtio-net header doesn't fit
1405 * in a contiguous virtual area.
1407 copy_vnet_hdr_from_desc(&tmp_hdr
, buf_vec
);
1410 hdr
= (struct virtio_net_hdr
*)((uintptr_t)buf_addr
);
1415 * A virtio driver normally uses at least 2 desc buffers
1416 * for Tx: the first for storing the header, and others
1417 * for storing the data.
1419 if (unlikely(buf_len
< dev
->vhost_hlen
)) {
1420 buf_offset
= dev
->vhost_hlen
- buf_len
;
1422 buf_addr
= buf_vec
[vec_idx
].buf_addr
;
1423 buf_iova
= buf_vec
[vec_idx
].buf_iova
;
1424 buf_len
= buf_vec
[vec_idx
].buf_len
;
1425 buf_avail
= buf_len
- buf_offset
;
1426 } else if (buf_len
== dev
->vhost_hlen
) {
1427 if (unlikely(++vec_idx
>= nr_vec
))
1429 buf_addr
= buf_vec
[vec_idx
].buf_addr
;
1430 buf_iova
= buf_vec
[vec_idx
].buf_iova
;
1431 buf_len
= buf_vec
[vec_idx
].buf_len
;
1434 buf_avail
= buf_len
;
1436 buf_offset
= dev
->vhost_hlen
;
1437 buf_avail
= buf_vec
[vec_idx
].buf_len
- dev
->vhost_hlen
;
1441 (uintptr_t)(buf_addr
+ buf_offset
),
1442 (uint32_t)buf_avail
, 0);
1445 mbuf_avail
= m
->buf_len
- RTE_PKTMBUF_HEADROOM
;
1449 cpy_len
= RTE_MIN(buf_avail
, mbuf_avail
);
1452 * A desc buf might across two host physical pages that are
1453 * not continuous. In such case (gpa_to_hpa returns 0), data
1454 * will be copied even though zero copy is enabled.
1456 if (unlikely(dev
->dequeue_zero_copy
&& (hpa
= gpa_to_hpa(dev
,
1457 buf_iova
+ buf_offset
, cpy_len
)))) {
1458 cur
->data_len
= cpy_len
;
1461 (void *)(uintptr_t)(buf_addr
+ buf_offset
);
1462 cur
->buf_iova
= hpa
;
1465 * In zero copy mode, one mbuf can only reference data
1466 * for one or partial of one desc buff.
1468 mbuf_avail
= cpy_len
;
1470 if (likely(cpy_len
> MAX_BATCH_LEN
||
1471 vq
->batch_copy_nb_elems
>= vq
->size
||
1472 (hdr
&& cur
== m
))) {
1473 rte_memcpy(rte_pktmbuf_mtod_offset(cur
, void *,
1475 (void *)((uintptr_t)(buf_addr
+
1479 batch_copy
[vq
->batch_copy_nb_elems
].dst
=
1480 rte_pktmbuf_mtod_offset(cur
, void *,
1482 batch_copy
[vq
->batch_copy_nb_elems
].src
=
1483 (void *)((uintptr_t)(buf_addr
+
1485 batch_copy
[vq
->batch_copy_nb_elems
].len
=
1487 vq
->batch_copy_nb_elems
++;
1491 mbuf_avail
-= cpy_len
;
1492 mbuf_offset
+= cpy_len
;
1493 buf_avail
-= cpy_len
;
1494 buf_offset
+= cpy_len
;
1496 /* This buf reaches to its end, get the next one */
1497 if (buf_avail
== 0) {
1498 if (++vec_idx
>= nr_vec
)
1501 buf_addr
= buf_vec
[vec_idx
].buf_addr
;
1502 buf_iova
= buf_vec
[vec_idx
].buf_iova
;
1503 buf_len
= buf_vec
[vec_idx
].buf_len
;
1506 buf_avail
= buf_len
;
1508 PRINT_PACKET(dev
, (uintptr_t)buf_addr
,
1509 (uint32_t)buf_avail
, 0);
1513 * This mbuf reaches to its end, get a new one
1514 * to hold more data.
1516 if (mbuf_avail
== 0) {
1517 cur
= rte_pktmbuf_alloc(mbuf_pool
);
1518 if (unlikely(cur
== NULL
)) {
1519 VHOST_LOG_DATA(ERR
, "Failed to "
1520 "allocate memory for mbuf.\n");
1524 if (unlikely(dev
->dequeue_zero_copy
))
1525 rte_mbuf_refcnt_update(cur
, 1);
1528 prev
->data_len
= mbuf_offset
;
1530 m
->pkt_len
+= mbuf_offset
;
1534 mbuf_avail
= cur
->buf_len
- RTE_PKTMBUF_HEADROOM
;
1538 prev
->data_len
= mbuf_offset
;
1539 m
->pkt_len
+= mbuf_offset
;
1542 vhost_dequeue_offload(hdr
, m
);
1549 static __rte_always_inline
struct zcopy_mbuf
*
1550 get_zmbuf(struct vhost_virtqueue
*vq
)
1556 /* search [last_zmbuf_idx, zmbuf_size) */
1557 i
= vq
->last_zmbuf_idx
;
1558 last
= vq
->zmbuf_size
;
1561 for (; i
< last
; i
++) {
1562 if (vq
->zmbufs
[i
].in_use
== 0) {
1563 vq
->last_zmbuf_idx
= i
+ 1;
1564 vq
->zmbufs
[i
].in_use
= 1;
1565 return &vq
->zmbufs
[i
];
1571 /* search [0, last_zmbuf_idx) */
1573 last
= vq
->last_zmbuf_idx
;
1581 virtio_dev_extbuf_free(void *addr __rte_unused
, void *opaque
)
1587 virtio_dev_extbuf_alloc(struct rte_mbuf
*pkt
, uint32_t size
)
1589 struct rte_mbuf_ext_shared_info
*shinfo
= NULL
;
1590 uint32_t total_len
= RTE_PKTMBUF_HEADROOM
+ size
;
1595 /* Try to use pkt buffer to store shinfo to reduce the amount of memory
1596 * required, otherwise store shinfo in the new buffer.
1598 if (rte_pktmbuf_tailroom(pkt
) >= sizeof(*shinfo
))
1599 shinfo
= rte_pktmbuf_mtod(pkt
,
1600 struct rte_mbuf_ext_shared_info
*);
1602 total_len
+= sizeof(*shinfo
) + sizeof(uintptr_t);
1603 total_len
= RTE_ALIGN_CEIL(total_len
, sizeof(uintptr_t));
1606 if (unlikely(total_len
> UINT16_MAX
))
1609 buf_len
= total_len
;
1610 buf
= rte_malloc(NULL
, buf_len
, RTE_CACHE_LINE_SIZE
);
1611 if (unlikely(buf
== NULL
))
1614 /* Initialize shinfo */
1616 shinfo
->free_cb
= virtio_dev_extbuf_free
;
1617 shinfo
->fcb_opaque
= buf
;
1618 rte_mbuf_ext_refcnt_set(shinfo
, 1);
1620 shinfo
= rte_pktmbuf_ext_shinfo_init_helper(buf
, &buf_len
,
1621 virtio_dev_extbuf_free
, buf
);
1622 if (unlikely(shinfo
== NULL
)) {
1624 VHOST_LOG_DATA(ERR
, "Failed to init shinfo\n");
1629 iova
= rte_malloc_virt2iova(buf
);
1630 rte_pktmbuf_attach_extbuf(pkt
, buf
, iova
, buf_len
, shinfo
);
1631 rte_pktmbuf_reset_headroom(pkt
);
1637 * Allocate a host supported pktmbuf.
1639 static __rte_always_inline
struct rte_mbuf
*
1640 virtio_dev_pktmbuf_alloc(struct virtio_net
*dev
, struct rte_mempool
*mp
,
1643 struct rte_mbuf
*pkt
= rte_pktmbuf_alloc(mp
);
1645 if (unlikely(pkt
== NULL
)) {
1647 "Failed to allocate memory for mbuf.\n");
1651 if (rte_pktmbuf_tailroom(pkt
) >= data_len
)
1654 /* attach an external buffer if supported */
1655 if (dev
->extbuf
&& !virtio_dev_extbuf_alloc(pkt
, data_len
))
1658 /* check if chained buffers are allowed */
1659 if (!dev
->linearbuf
)
1662 /* Data doesn't fit into the buffer and the host supports
1663 * only linear buffers
1665 rte_pktmbuf_free(pkt
);
1670 static __rte_noinline
uint16_t
1671 virtio_dev_tx_split(struct virtio_net
*dev
, struct vhost_virtqueue
*vq
,
1672 struct rte_mempool
*mbuf_pool
, struct rte_mbuf
**pkts
, uint16_t count
)
1675 uint16_t free_entries
;
1676 uint16_t dropped
= 0;
1677 static bool allocerr_warned
;
1679 if (unlikely(dev
->dequeue_zero_copy
)) {
1680 struct zcopy_mbuf
*zmbuf
, *next
;
1682 for (zmbuf
= TAILQ_FIRST(&vq
->zmbuf_list
);
1683 zmbuf
!= NULL
; zmbuf
= next
) {
1684 next
= TAILQ_NEXT(zmbuf
, next
);
1686 if (mbuf_is_consumed(zmbuf
->mbuf
)) {
1687 update_shadow_used_ring_split(vq
,
1688 zmbuf
->desc_idx
, 0);
1689 TAILQ_REMOVE(&vq
->zmbuf_list
, zmbuf
, next
);
1690 restore_mbuf(zmbuf
->mbuf
);
1691 rte_pktmbuf_free(zmbuf
->mbuf
);
1697 if (likely(vq
->shadow_used_idx
)) {
1698 flush_shadow_used_ring_split(dev
, vq
);
1699 vhost_vring_call_split(dev
, vq
);
1704 * The ordering between avail index and
1705 * desc reads needs to be enforced.
1707 free_entries
= __atomic_load_n(&vq
->avail
->idx
, __ATOMIC_ACQUIRE
) -
1709 if (free_entries
== 0)
1712 rte_prefetch0(&vq
->avail
->ring
[vq
->last_avail_idx
& (vq
->size
- 1)]);
1714 VHOST_LOG_DATA(DEBUG
, "(%d) %s\n", dev
->vid
, __func__
);
1716 count
= RTE_MIN(count
, MAX_PKT_BURST
);
1717 count
= RTE_MIN(count
, free_entries
);
1718 VHOST_LOG_DATA(DEBUG
, "(%d) about to dequeue %u buffers\n",
1721 for (i
= 0; i
< count
; i
++) {
1722 struct buf_vector buf_vec
[BUF_VECTOR_MAX
];
1725 uint16_t nr_vec
= 0;
1728 if (unlikely(fill_vec_buf_split(dev
, vq
,
1729 vq
->last_avail_idx
+ i
,
1731 &head_idx
, &buf_len
,
1732 VHOST_ACCESS_RO
) < 0))
1735 if (likely(dev
->dequeue_zero_copy
== 0))
1736 update_shadow_used_ring_split(vq
, head_idx
, 0);
1738 pkts
[i
] = virtio_dev_pktmbuf_alloc(dev
, mbuf_pool
, buf_len
);
1739 if (unlikely(pkts
[i
] == NULL
)) {
1741 * mbuf allocation fails for jumbo packets when external
1742 * buffer allocation is not allowed and linear buffer
1743 * is required. Drop this packet.
1745 if (!allocerr_warned
) {
1747 "Failed mbuf alloc of size %d from %s on %s.\n",
1748 buf_len
, mbuf_pool
->name
, dev
->ifname
);
1749 allocerr_warned
= true;
1756 err
= copy_desc_to_mbuf(dev
, vq
, buf_vec
, nr_vec
, pkts
[i
],
1758 if (unlikely(err
)) {
1759 rte_pktmbuf_free(pkts
[i
]);
1760 if (!allocerr_warned
) {
1762 "Failed to copy desc to mbuf on %s.\n",
1764 allocerr_warned
= true;
1771 if (unlikely(dev
->dequeue_zero_copy
)) {
1772 struct zcopy_mbuf
*zmbuf
;
1774 zmbuf
= get_zmbuf(vq
);
1776 rte_pktmbuf_free(pkts
[i
]);
1781 zmbuf
->mbuf
= pkts
[i
];
1782 zmbuf
->desc_idx
= head_idx
;
1785 * Pin lock the mbuf; we will check later to see
1786 * whether the mbuf is freed (when we are the last
1787 * user) or not. If that's the case, we then could
1788 * update the used ring safely.
1790 rte_mbuf_refcnt_update(pkts
[i
], 1);
1793 TAILQ_INSERT_TAIL(&vq
->zmbuf_list
, zmbuf
, next
);
1796 vq
->last_avail_idx
+= i
;
1798 if (likely(dev
->dequeue_zero_copy
== 0)) {
1799 do_data_copy_dequeue(vq
);
1800 if (unlikely(i
< count
))
1801 vq
->shadow_used_idx
= i
;
1802 if (likely(vq
->shadow_used_idx
)) {
1803 flush_shadow_used_ring_split(dev
, vq
);
1804 vhost_vring_call_split(dev
, vq
);
1808 return (i
- dropped
);
1811 static __rte_always_inline
int
1812 vhost_reserve_avail_batch_packed(struct virtio_net
*dev
,
1813 struct vhost_virtqueue
*vq
,
1814 struct rte_mempool
*mbuf_pool
,
1815 struct rte_mbuf
**pkts
,
1817 uintptr_t *desc_addrs
,
1820 bool wrap
= vq
->avail_wrap_counter
;
1821 struct vring_packed_desc
*descs
= vq
->desc_packed
;
1822 struct virtio_net_hdr
*hdr
;
1823 uint64_t lens
[PACKED_BATCH_SIZE
];
1824 uint64_t buf_lens
[PACKED_BATCH_SIZE
];
1825 uint32_t buf_offset
= dev
->vhost_hlen
;
1828 if (unlikely(avail_idx
& PACKED_BATCH_MASK
))
1830 if (unlikely((avail_idx
+ PACKED_BATCH_SIZE
) > vq
->size
))
1833 vhost_for_each_try_unroll(i
, 0, PACKED_BATCH_SIZE
) {
1834 flags
= descs
[avail_idx
+ i
].flags
;
1835 if (unlikely((wrap
!= !!(flags
& VRING_DESC_F_AVAIL
)) ||
1836 (wrap
== !!(flags
& VRING_DESC_F_USED
)) ||
1837 (flags
& PACKED_DESC_SINGLE_DEQUEUE_FLAG
)))
1843 vhost_for_each_try_unroll(i
, 0, PACKED_BATCH_SIZE
)
1844 lens
[i
] = descs
[avail_idx
+ i
].len
;
1846 vhost_for_each_try_unroll(i
, 0, PACKED_BATCH_SIZE
) {
1847 desc_addrs
[i
] = vhost_iova_to_vva(dev
, vq
,
1848 descs
[avail_idx
+ i
].addr
,
1849 &lens
[i
], VHOST_ACCESS_RW
);
1852 vhost_for_each_try_unroll(i
, 0, PACKED_BATCH_SIZE
) {
1853 if (unlikely(!desc_addrs
[i
]))
1855 if (unlikely((lens
[i
] != descs
[avail_idx
+ i
].len
)))
1859 vhost_for_each_try_unroll(i
, 0, PACKED_BATCH_SIZE
) {
1860 pkts
[i
] = virtio_dev_pktmbuf_alloc(dev
, mbuf_pool
, lens
[i
]);
1865 vhost_for_each_try_unroll(i
, 0, PACKED_BATCH_SIZE
)
1866 buf_lens
[i
] = pkts
[i
]->buf_len
- pkts
[i
]->data_off
;
1868 vhost_for_each_try_unroll(i
, 0, PACKED_BATCH_SIZE
) {
1869 if (unlikely(buf_lens
[i
] < (lens
[i
] - buf_offset
)))
1873 vhost_for_each_try_unroll(i
, 0, PACKED_BATCH_SIZE
) {
1874 pkts
[i
]->pkt_len
= descs
[avail_idx
+ i
].len
- buf_offset
;
1875 pkts
[i
]->data_len
= pkts
[i
]->pkt_len
;
1876 ids
[i
] = descs
[avail_idx
+ i
].id
;
1879 if (virtio_net_with_host_offload(dev
)) {
1880 vhost_for_each_try_unroll(i
, 0, PACKED_BATCH_SIZE
) {
1881 hdr
= (struct virtio_net_hdr
*)(desc_addrs
[i
]);
1882 vhost_dequeue_offload(hdr
, pkts
[i
]);
1889 for (i
= 0; i
< PACKED_BATCH_SIZE
; i
++)
1890 rte_pktmbuf_free(pkts
[i
]);
1895 static __rte_always_inline
int
1896 virtio_dev_tx_batch_packed(struct virtio_net
*dev
,
1897 struct vhost_virtqueue
*vq
,
1898 struct rte_mempool
*mbuf_pool
,
1899 struct rte_mbuf
**pkts
)
1901 uint16_t avail_idx
= vq
->last_avail_idx
;
1902 uint32_t buf_offset
= dev
->vhost_hlen
;
1903 uintptr_t desc_addrs
[PACKED_BATCH_SIZE
];
1904 uint16_t ids
[PACKED_BATCH_SIZE
];
1907 if (vhost_reserve_avail_batch_packed(dev
, vq
, mbuf_pool
, pkts
,
1908 avail_idx
, desc_addrs
, ids
))
1911 vhost_for_each_try_unroll(i
, 0, PACKED_BATCH_SIZE
)
1912 rte_prefetch0((void *)(uintptr_t)desc_addrs
[i
]);
1914 vhost_for_each_try_unroll(i
, 0, PACKED_BATCH_SIZE
)
1915 rte_memcpy(rte_pktmbuf_mtod_offset(pkts
[i
], void *, 0),
1916 (void *)(uintptr_t)(desc_addrs
[i
] + buf_offset
),
1919 if (virtio_net_is_inorder(dev
))
1920 vhost_shadow_dequeue_batch_packed_inorder(vq
,
1921 ids
[PACKED_BATCH_SIZE
- 1]);
1923 vhost_shadow_dequeue_batch_packed(dev
, vq
, ids
);
1925 vq_inc_last_avail_packed(vq
, PACKED_BATCH_SIZE
);
1930 static __rte_always_inline
int
1931 vhost_dequeue_single_packed(struct virtio_net
*dev
,
1932 struct vhost_virtqueue
*vq
,
1933 struct rte_mempool
*mbuf_pool
,
1934 struct rte_mbuf
**pkts
,
1936 uint16_t *desc_count
)
1938 struct buf_vector buf_vec
[BUF_VECTOR_MAX
];
1940 uint16_t nr_vec
= 0;
1942 static bool allocerr_warned
;
1944 if (unlikely(fill_vec_buf_packed(dev
, vq
,
1945 vq
->last_avail_idx
, desc_count
,
1948 VHOST_ACCESS_RO
) < 0))
1951 *pkts
= virtio_dev_pktmbuf_alloc(dev
, mbuf_pool
, buf_len
);
1952 if (unlikely(*pkts
== NULL
)) {
1953 if (!allocerr_warned
) {
1955 "Failed mbuf alloc of size %d from %s on %s.\n",
1956 buf_len
, mbuf_pool
->name
, dev
->ifname
);
1957 allocerr_warned
= true;
1962 err
= copy_desc_to_mbuf(dev
, vq
, buf_vec
, nr_vec
, *pkts
,
1964 if (unlikely(err
)) {
1965 if (!allocerr_warned
) {
1967 "Failed to copy desc to mbuf on %s.\n",
1969 allocerr_warned
= true;
1971 rte_pktmbuf_free(*pkts
);
1978 static __rte_always_inline
int
1979 virtio_dev_tx_single_packed(struct virtio_net
*dev
,
1980 struct vhost_virtqueue
*vq
,
1981 struct rte_mempool
*mbuf_pool
,
1982 struct rte_mbuf
**pkts
)
1985 uint16_t buf_id
, desc_count
= 0;
1988 ret
= vhost_dequeue_single_packed(dev
, vq
, mbuf_pool
, pkts
, &buf_id
,
1991 if (likely(desc_count
> 0)) {
1992 if (virtio_net_is_inorder(dev
))
1993 vhost_shadow_dequeue_single_packed_inorder(vq
, buf_id
,
1996 vhost_shadow_dequeue_single_packed(vq
, buf_id
,
1999 vq_inc_last_avail_packed(vq
, desc_count
);
2005 static __rte_always_inline
int
2006 virtio_dev_tx_batch_packed_zmbuf(struct virtio_net
*dev
,
2007 struct vhost_virtqueue
*vq
,
2008 struct rte_mempool
*mbuf_pool
,
2009 struct rte_mbuf
**pkts
)
2011 struct zcopy_mbuf
*zmbufs
[PACKED_BATCH_SIZE
];
2012 uintptr_t desc_addrs
[PACKED_BATCH_SIZE
];
2013 uint16_t ids
[PACKED_BATCH_SIZE
];
2016 uint16_t avail_idx
= vq
->last_avail_idx
;
2018 if (vhost_reserve_avail_batch_packed(dev
, vq
, mbuf_pool
, pkts
,
2019 avail_idx
, desc_addrs
, ids
))
2022 vhost_for_each_try_unroll(i
, 0, PACKED_BATCH_SIZE
)
2023 zmbufs
[i
] = get_zmbuf(vq
);
2025 vhost_for_each_try_unroll(i
, 0, PACKED_BATCH_SIZE
) {
2030 vhost_for_each_try_unroll(i
, 0, PACKED_BATCH_SIZE
) {
2031 zmbufs
[i
]->mbuf
= pkts
[i
];
2032 zmbufs
[i
]->desc_idx
= ids
[i
];
2033 zmbufs
[i
]->desc_count
= 1;
2036 vhost_for_each_try_unroll(i
, 0, PACKED_BATCH_SIZE
)
2037 rte_mbuf_refcnt_update(pkts
[i
], 1);
2039 vhost_for_each_try_unroll(i
, 0, PACKED_BATCH_SIZE
)
2040 TAILQ_INSERT_TAIL(&vq
->zmbuf_list
, zmbufs
[i
], next
);
2042 vq
->nr_zmbuf
+= PACKED_BATCH_SIZE
;
2043 vq_inc_last_avail_packed(vq
, PACKED_BATCH_SIZE
);
2048 vhost_for_each_try_unroll(i
, 0, PACKED_BATCH_SIZE
)
2049 rte_pktmbuf_free(pkts
[i
]);
2054 static __rte_always_inline
int
2055 virtio_dev_tx_single_packed_zmbuf(struct virtio_net
*dev
,
2056 struct vhost_virtqueue
*vq
,
2057 struct rte_mempool
*mbuf_pool
,
2058 struct rte_mbuf
**pkts
)
2060 uint16_t buf_id
, desc_count
;
2061 struct zcopy_mbuf
*zmbuf
;
2063 if (vhost_dequeue_single_packed(dev
, vq
, mbuf_pool
, pkts
, &buf_id
,
2067 zmbuf
= get_zmbuf(vq
);
2069 rte_pktmbuf_free(*pkts
);
2072 zmbuf
->mbuf
= *pkts
;
2073 zmbuf
->desc_idx
= buf_id
;
2074 zmbuf
->desc_count
= desc_count
;
2076 rte_mbuf_refcnt_update(*pkts
, 1);
2079 TAILQ_INSERT_TAIL(&vq
->zmbuf_list
, zmbuf
, next
);
2081 vq_inc_last_avail_packed(vq
, desc_count
);
2085 static __rte_always_inline
void
2086 free_zmbuf(struct vhost_virtqueue
*vq
)
2088 struct zcopy_mbuf
*next
= NULL
;
2089 struct zcopy_mbuf
*zmbuf
;
2091 for (zmbuf
= TAILQ_FIRST(&vq
->zmbuf_list
);
2092 zmbuf
!= NULL
; zmbuf
= next
) {
2093 next
= TAILQ_NEXT(zmbuf
, next
);
2095 uint16_t last_used_idx
= vq
->last_used_idx
;
2097 if (mbuf_is_consumed(zmbuf
->mbuf
)) {
2099 flags
= vq
->desc_packed
[last_used_idx
].flags
;
2100 if (vq
->used_wrap_counter
) {
2101 flags
|= VRING_DESC_F_USED
;
2102 flags
|= VRING_DESC_F_AVAIL
;
2104 flags
&= ~VRING_DESC_F_USED
;
2105 flags
&= ~VRING_DESC_F_AVAIL
;
2108 vq
->desc_packed
[last_used_idx
].id
= zmbuf
->desc_idx
;
2109 vq
->desc_packed
[last_used_idx
].len
= 0;
2112 vq
->desc_packed
[last_used_idx
].flags
= flags
;
2114 vq_inc_last_used_packed(vq
, zmbuf
->desc_count
);
2116 TAILQ_REMOVE(&vq
->zmbuf_list
, zmbuf
, next
);
2117 restore_mbuf(zmbuf
->mbuf
);
2118 rte_pktmbuf_free(zmbuf
->mbuf
);
2125 static __rte_noinline
uint16_t
2126 virtio_dev_tx_packed_zmbuf(struct virtio_net
*dev
,
2127 struct vhost_virtqueue
*vq
,
2128 struct rte_mempool
*mbuf_pool
,
2129 struct rte_mbuf
**pkts
,
2132 uint32_t pkt_idx
= 0;
2133 uint32_t remained
= count
;
2138 if (remained
>= PACKED_BATCH_SIZE
) {
2139 if (!virtio_dev_tx_batch_packed_zmbuf(dev
, vq
,
2140 mbuf_pool
, &pkts
[pkt_idx
])) {
2141 pkt_idx
+= PACKED_BATCH_SIZE
;
2142 remained
-= PACKED_BATCH_SIZE
;
2147 if (virtio_dev_tx_single_packed_zmbuf(dev
, vq
, mbuf_pool
,
2156 vhost_vring_call_packed(dev
, vq
);
2161 static __rte_noinline
uint16_t
2162 virtio_dev_tx_packed(struct virtio_net
*dev
,
2163 struct vhost_virtqueue
*vq
,
2164 struct rte_mempool
*mbuf_pool
,
2165 struct rte_mbuf
**pkts
,
2168 uint32_t pkt_idx
= 0;
2169 uint32_t remained
= count
;
2172 rte_prefetch0(&vq
->desc_packed
[vq
->last_avail_idx
]);
2174 if (remained
>= PACKED_BATCH_SIZE
) {
2175 if (!virtio_dev_tx_batch_packed(dev
, vq
, mbuf_pool
,
2177 pkt_idx
+= PACKED_BATCH_SIZE
;
2178 remained
-= PACKED_BATCH_SIZE
;
2183 if (virtio_dev_tx_single_packed(dev
, vq
, mbuf_pool
,
2191 if (vq
->shadow_used_idx
) {
2192 do_data_copy_dequeue(vq
);
2194 vhost_flush_dequeue_shadow_packed(dev
, vq
);
2195 vhost_vring_call_packed(dev
, vq
);
2202 rte_vhost_dequeue_burst(int vid
, uint16_t queue_id
,
2203 struct rte_mempool
*mbuf_pool
, struct rte_mbuf
**pkts
, uint16_t count
)
2205 struct virtio_net
*dev
;
2206 struct rte_mbuf
*rarp_mbuf
= NULL
;
2207 struct vhost_virtqueue
*vq
;
2208 int16_t success
= 1;
2210 dev
= get_device(vid
);
2214 if (unlikely(!(dev
->flags
& VIRTIO_DEV_BUILTIN_VIRTIO_NET
))) {
2216 "(%d) %s: built-in vhost net backend is disabled.\n",
2217 dev
->vid
, __func__
);
2221 if (unlikely(!is_valid_virt_queue_idx(queue_id
, 1, dev
->nr_vring
))) {
2223 "(%d) %s: invalid virtqueue idx %d.\n",
2224 dev
->vid
, __func__
, queue_id
);
2228 vq
= dev
->virtqueue
[queue_id
];
2230 if (unlikely(rte_spinlock_trylock(&vq
->access_lock
) == 0))
2233 if (unlikely(vq
->enabled
== 0)) {
2235 goto out_access_unlock
;
2238 if (dev
->features
& (1ULL << VIRTIO_F_IOMMU_PLATFORM
))
2239 vhost_user_iotlb_rd_lock(vq
);
2241 if (unlikely(vq
->access_ok
== 0))
2242 if (unlikely(vring_translate(dev
, vq
) < 0)) {
2248 * Construct a RARP broadcast packet, and inject it to the "pkts"
2249 * array, to looks like that guest actually send such packet.
2251 * Check user_send_rarp() for more information.
2253 * broadcast_rarp shares a cacheline in the virtio_net structure
2254 * with some fields that are accessed during enqueue and
2255 * __atomic_compare_exchange_n causes a write if performed compare
2256 * and exchange. This could result in false sharing between enqueue
2259 * Prevent unnecessary false sharing by reading broadcast_rarp first
2260 * and only performing compare and exchange if the read indicates it
2261 * is likely to be set.
2263 if (unlikely(__atomic_load_n(&dev
->broadcast_rarp
, __ATOMIC_ACQUIRE
) &&
2264 __atomic_compare_exchange_n(&dev
->broadcast_rarp
,
2265 &success
, 0, 0, __ATOMIC_RELEASE
, __ATOMIC_RELAXED
))) {
2267 rarp_mbuf
= rte_net_make_rarp_packet(mbuf_pool
, &dev
->mac
);
2268 if (rarp_mbuf
== NULL
) {
2269 VHOST_LOG_DATA(ERR
, "Failed to make RARP packet.\n");
2276 if (vq_is_packed(dev
)) {
2277 if (unlikely(dev
->dequeue_zero_copy
))
2278 count
= virtio_dev_tx_packed_zmbuf(dev
, vq
, mbuf_pool
,
2281 count
= virtio_dev_tx_packed(dev
, vq
, mbuf_pool
, pkts
,
2284 count
= virtio_dev_tx_split(dev
, vq
, mbuf_pool
, pkts
, count
);
2287 if (dev
->features
& (1ULL << VIRTIO_F_IOMMU_PLATFORM
))
2288 vhost_user_iotlb_rd_unlock(vq
);
2291 rte_spinlock_unlock(&vq
->access_lock
);
2293 if (unlikely(rarp_mbuf
!= NULL
)) {
2295 * Inject it to the head of "pkts" array, so that switch's mac
2296 * learning table will get updated first.
2298 memmove(&pkts
[1], pkts
, count
* sizeof(struct rte_mbuf
*));
2299 pkts
[0] = rarp_mbuf
;