]> git.proxmox.com Git - ceph.git/blob - ceph/src/spdk/dpdk/lib/librte_vhost/virtio_net.c
import 15.2.0 Octopus source
[ceph.git] / ceph / src / spdk / dpdk / lib / librte_vhost / virtio_net.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2010-2016 Intel Corporation
3 */
4
5 #include <stdint.h>
6 #include <stdbool.h>
7 #include <linux/virtio_net.h>
8
9 #include <rte_mbuf.h>
10 #include <rte_memcpy.h>
11 #include <rte_ether.h>
12 #include <rte_ip.h>
13 #include <rte_vhost.h>
14 #include <rte_tcp.h>
15 #include <rte_udp.h>
16 #include <rte_sctp.h>
17 #include <rte_arp.h>
18 #include <rte_spinlock.h>
19 #include <rte_malloc.h>
20
21 #include "iotlb.h"
22 #include "vhost.h"
23
24 #define MAX_PKT_BURST 32
25
26 #define MAX_BATCH_LEN 256
27
28 static __rte_always_inline bool
29 rxvq_is_mergeable(struct virtio_net *dev)
30 {
31 return dev->features & (1ULL << VIRTIO_NET_F_MRG_RXBUF);
32 }
33
34 static bool
35 is_valid_virt_queue_idx(uint32_t idx, int is_tx, uint32_t nr_vring)
36 {
37 return (is_tx ^ (idx & 1)) == 0 && idx < nr_vring;
38 }
39
40 static __rte_always_inline void
41 do_flush_shadow_used_ring_split(struct virtio_net *dev,
42 struct vhost_virtqueue *vq,
43 uint16_t to, uint16_t from, uint16_t size)
44 {
45 rte_memcpy(&vq->used->ring[to],
46 &vq->shadow_used_split[from],
47 size * sizeof(struct vring_used_elem));
48 vhost_log_cache_used_vring(dev, vq,
49 offsetof(struct vring_used, ring[to]),
50 size * sizeof(struct vring_used_elem));
51 }
52
53 static __rte_always_inline void
54 flush_shadow_used_ring_split(struct virtio_net *dev, struct vhost_virtqueue *vq)
55 {
56 uint16_t used_idx = vq->last_used_idx & (vq->size - 1);
57
58 if (used_idx + vq->shadow_used_idx <= vq->size) {
59 do_flush_shadow_used_ring_split(dev, vq, used_idx, 0,
60 vq->shadow_used_idx);
61 } else {
62 uint16_t size;
63
64 /* update used ring interval [used_idx, vq->size] */
65 size = vq->size - used_idx;
66 do_flush_shadow_used_ring_split(dev, vq, used_idx, 0, size);
67
68 /* update the left half used ring interval [0, left_size] */
69 do_flush_shadow_used_ring_split(dev, vq, 0, size,
70 vq->shadow_used_idx - size);
71 }
72 vq->last_used_idx += vq->shadow_used_idx;
73
74 rte_smp_wmb();
75
76 vhost_log_cache_sync(dev, vq);
77
78 *(volatile uint16_t *)&vq->used->idx += vq->shadow_used_idx;
79 vq->shadow_used_idx = 0;
80 vhost_log_used_vring(dev, vq, offsetof(struct vring_used, idx),
81 sizeof(vq->used->idx));
82 }
83
84 static __rte_always_inline void
85 update_shadow_used_ring_split(struct vhost_virtqueue *vq,
86 uint16_t desc_idx, uint32_t len)
87 {
88 uint16_t i = vq->shadow_used_idx++;
89
90 vq->shadow_used_split[i].id = desc_idx;
91 vq->shadow_used_split[i].len = len;
92 }
93
94 static __rte_always_inline void
95 flush_shadow_used_ring_packed(struct virtio_net *dev,
96 struct vhost_virtqueue *vq)
97 {
98 int i;
99 uint16_t used_idx = vq->last_used_idx;
100 uint16_t head_idx = vq->last_used_idx;
101 uint16_t head_flags = 0;
102
103 /* Split loop in two to save memory barriers */
104 for (i = 0; i < vq->shadow_used_idx; i++) {
105 vq->desc_packed[used_idx].id = vq->shadow_used_packed[i].id;
106 vq->desc_packed[used_idx].len = vq->shadow_used_packed[i].len;
107
108 used_idx += vq->shadow_used_packed[i].count;
109 if (used_idx >= vq->size)
110 used_idx -= vq->size;
111 }
112
113 rte_smp_wmb();
114
115 for (i = 0; i < vq->shadow_used_idx; i++) {
116 uint16_t flags;
117
118 if (vq->shadow_used_packed[i].len)
119 flags = VRING_DESC_F_WRITE;
120 else
121 flags = 0;
122
123 if (vq->used_wrap_counter) {
124 flags |= VRING_DESC_F_USED;
125 flags |= VRING_DESC_F_AVAIL;
126 } else {
127 flags &= ~VRING_DESC_F_USED;
128 flags &= ~VRING_DESC_F_AVAIL;
129 }
130
131 if (i > 0) {
132 vq->desc_packed[vq->last_used_idx].flags = flags;
133
134 vhost_log_cache_used_vring(dev, vq,
135 vq->last_used_idx *
136 sizeof(struct vring_packed_desc),
137 sizeof(struct vring_packed_desc));
138 } else {
139 head_idx = vq->last_used_idx;
140 head_flags = flags;
141 }
142
143 vq->last_used_idx += vq->shadow_used_packed[i].count;
144 if (vq->last_used_idx >= vq->size) {
145 vq->used_wrap_counter ^= 1;
146 vq->last_used_idx -= vq->size;
147 }
148 }
149
150 vq->desc_packed[head_idx].flags = head_flags;
151
152 vhost_log_cache_used_vring(dev, vq,
153 head_idx *
154 sizeof(struct vring_packed_desc),
155 sizeof(struct vring_packed_desc));
156
157 vq->shadow_used_idx = 0;
158 vhost_log_cache_sync(dev, vq);
159 }
160
161 static __rte_always_inline void
162 update_shadow_used_ring_packed(struct vhost_virtqueue *vq,
163 uint16_t desc_idx, uint32_t len, uint16_t count)
164 {
165 uint16_t i = vq->shadow_used_idx++;
166
167 vq->shadow_used_packed[i].id = desc_idx;
168 vq->shadow_used_packed[i].len = len;
169 vq->shadow_used_packed[i].count = count;
170 }
171
172 static inline void
173 do_data_copy_enqueue(struct virtio_net *dev, struct vhost_virtqueue *vq)
174 {
175 struct batch_copy_elem *elem = vq->batch_copy_elems;
176 uint16_t count = vq->batch_copy_nb_elems;
177 int i;
178
179 for (i = 0; i < count; i++) {
180 rte_memcpy(elem[i].dst, elem[i].src, elem[i].len);
181 vhost_log_cache_write(dev, vq, elem[i].log_addr, elem[i].len);
182 PRINT_PACKET(dev, (uintptr_t)elem[i].dst, elem[i].len, 0);
183 }
184
185 vq->batch_copy_nb_elems = 0;
186 }
187
188 static inline void
189 do_data_copy_dequeue(struct vhost_virtqueue *vq)
190 {
191 struct batch_copy_elem *elem = vq->batch_copy_elems;
192 uint16_t count = vq->batch_copy_nb_elems;
193 int i;
194
195 for (i = 0; i < count; i++)
196 rte_memcpy(elem[i].dst, elem[i].src, elem[i].len);
197
198 vq->batch_copy_nb_elems = 0;
199 }
200
201 /* avoid write operation when necessary, to lessen cache issues */
202 #define ASSIGN_UNLESS_EQUAL(var, val) do { \
203 if ((var) != (val)) \
204 (var) = (val); \
205 } while (0)
206
207 static __rte_always_inline void
208 virtio_enqueue_offload(struct rte_mbuf *m_buf, struct virtio_net_hdr *net_hdr)
209 {
210 uint64_t csum_l4 = m_buf->ol_flags & PKT_TX_L4_MASK;
211
212 if (m_buf->ol_flags & PKT_TX_TCP_SEG)
213 csum_l4 |= PKT_TX_TCP_CKSUM;
214
215 if (csum_l4) {
216 net_hdr->flags = VIRTIO_NET_HDR_F_NEEDS_CSUM;
217 net_hdr->csum_start = m_buf->l2_len + m_buf->l3_len;
218
219 switch (csum_l4) {
220 case PKT_TX_TCP_CKSUM:
221 net_hdr->csum_offset = (offsetof(struct tcp_hdr,
222 cksum));
223 break;
224 case PKT_TX_UDP_CKSUM:
225 net_hdr->csum_offset = (offsetof(struct udp_hdr,
226 dgram_cksum));
227 break;
228 case PKT_TX_SCTP_CKSUM:
229 net_hdr->csum_offset = (offsetof(struct sctp_hdr,
230 cksum));
231 break;
232 }
233 } else {
234 ASSIGN_UNLESS_EQUAL(net_hdr->csum_start, 0);
235 ASSIGN_UNLESS_EQUAL(net_hdr->csum_offset, 0);
236 ASSIGN_UNLESS_EQUAL(net_hdr->flags, 0);
237 }
238
239 /* IP cksum verification cannot be bypassed, then calculate here */
240 if (m_buf->ol_flags & PKT_TX_IP_CKSUM) {
241 struct ipv4_hdr *ipv4_hdr;
242
243 ipv4_hdr = rte_pktmbuf_mtod_offset(m_buf, struct ipv4_hdr *,
244 m_buf->l2_len);
245 ipv4_hdr->hdr_checksum = rte_ipv4_cksum(ipv4_hdr);
246 }
247
248 if (m_buf->ol_flags & PKT_TX_TCP_SEG) {
249 if (m_buf->ol_flags & PKT_TX_IPV4)
250 net_hdr->gso_type = VIRTIO_NET_HDR_GSO_TCPV4;
251 else
252 net_hdr->gso_type = VIRTIO_NET_HDR_GSO_TCPV6;
253 net_hdr->gso_size = m_buf->tso_segsz;
254 net_hdr->hdr_len = m_buf->l2_len + m_buf->l3_len
255 + m_buf->l4_len;
256 } else if (m_buf->ol_flags & PKT_TX_UDP_SEG) {
257 net_hdr->gso_type = VIRTIO_NET_HDR_GSO_UDP;
258 net_hdr->gso_size = m_buf->tso_segsz;
259 net_hdr->hdr_len = m_buf->l2_len + m_buf->l3_len +
260 m_buf->l4_len;
261 } else {
262 ASSIGN_UNLESS_EQUAL(net_hdr->gso_type, 0);
263 ASSIGN_UNLESS_EQUAL(net_hdr->gso_size, 0);
264 ASSIGN_UNLESS_EQUAL(net_hdr->hdr_len, 0);
265 }
266 }
267
268 static __rte_always_inline int
269 map_one_desc(struct virtio_net *dev, struct vhost_virtqueue *vq,
270 struct buf_vector *buf_vec, uint16_t *vec_idx,
271 uint64_t desc_iova, uint64_t desc_len, uint8_t perm)
272 {
273 uint16_t vec_id = *vec_idx;
274
275 while (desc_len) {
276 uint64_t desc_addr;
277 uint64_t desc_chunck_len = desc_len;
278
279 if (unlikely(vec_id >= BUF_VECTOR_MAX))
280 return -1;
281
282 desc_addr = vhost_iova_to_vva(dev, vq,
283 desc_iova,
284 &desc_chunck_len,
285 perm);
286 if (unlikely(!desc_addr))
287 return -1;
288
289 buf_vec[vec_id].buf_iova = desc_iova;
290 buf_vec[vec_id].buf_addr = desc_addr;
291 buf_vec[vec_id].buf_len = desc_chunck_len;
292
293 desc_len -= desc_chunck_len;
294 desc_iova += desc_chunck_len;
295 vec_id++;
296 }
297 *vec_idx = vec_id;
298
299 return 0;
300 }
301
302 static __rte_always_inline int
303 fill_vec_buf_split(struct virtio_net *dev, struct vhost_virtqueue *vq,
304 uint32_t avail_idx, uint16_t *vec_idx,
305 struct buf_vector *buf_vec, uint16_t *desc_chain_head,
306 uint32_t *desc_chain_len, uint8_t perm)
307 {
308 uint16_t idx = vq->avail->ring[avail_idx & (vq->size - 1)];
309 uint16_t vec_id = *vec_idx;
310 uint32_t len = 0;
311 uint64_t dlen;
312 uint32_t nr_descs = vq->size;
313 uint32_t cnt = 0;
314 struct vring_desc *descs = vq->desc;
315 struct vring_desc *idesc = NULL;
316
317 if (unlikely(idx >= vq->size))
318 return -1;
319
320 *desc_chain_head = idx;
321
322 if (vq->desc[idx].flags & VRING_DESC_F_INDIRECT) {
323 dlen = vq->desc[idx].len;
324 nr_descs = dlen / sizeof(struct vring_desc);
325 if (unlikely(nr_descs > vq->size))
326 return -1;
327
328 descs = (struct vring_desc *)(uintptr_t)
329 vhost_iova_to_vva(dev, vq, vq->desc[idx].addr,
330 &dlen,
331 VHOST_ACCESS_RO);
332 if (unlikely(!descs))
333 return -1;
334
335 if (unlikely(dlen < vq->desc[idx].len)) {
336 /*
337 * The indirect desc table is not contiguous
338 * in process VA space, we have to copy it.
339 */
340 idesc = alloc_copy_ind_table(dev, vq,
341 vq->desc[idx].addr, vq->desc[idx].len);
342 if (unlikely(!idesc))
343 return -1;
344
345 descs = idesc;
346 }
347
348 idx = 0;
349 }
350
351 while (1) {
352 if (unlikely(idx >= nr_descs || cnt++ >= nr_descs)) {
353 free_ind_table(idesc);
354 return -1;
355 }
356
357 len += descs[idx].len;
358
359 if (unlikely(map_one_desc(dev, vq, buf_vec, &vec_id,
360 descs[idx].addr, descs[idx].len,
361 perm))) {
362 free_ind_table(idesc);
363 return -1;
364 }
365
366 if ((descs[idx].flags & VRING_DESC_F_NEXT) == 0)
367 break;
368
369 idx = descs[idx].next;
370 }
371
372 *desc_chain_len = len;
373 *vec_idx = vec_id;
374
375 if (unlikely(!!idesc))
376 free_ind_table(idesc);
377
378 return 0;
379 }
380
381 /*
382 * Returns -1 on fail, 0 on success
383 */
384 static inline int
385 reserve_avail_buf_split(struct virtio_net *dev, struct vhost_virtqueue *vq,
386 uint32_t size, struct buf_vector *buf_vec,
387 uint16_t *num_buffers, uint16_t avail_head,
388 uint16_t *nr_vec)
389 {
390 uint16_t cur_idx;
391 uint16_t vec_idx = 0;
392 uint16_t max_tries, tries = 0;
393
394 uint16_t head_idx = 0;
395 uint32_t len = 0;
396
397 *num_buffers = 0;
398 cur_idx = vq->last_avail_idx;
399
400 if (rxvq_is_mergeable(dev))
401 max_tries = vq->size - 1;
402 else
403 max_tries = 1;
404
405 while (size > 0) {
406 if (unlikely(cur_idx == avail_head))
407 return -1;
408 /*
409 * if we tried all available ring items, and still
410 * can't get enough buf, it means something abnormal
411 * happened.
412 */
413 if (unlikely(++tries > max_tries))
414 return -1;
415
416 if (unlikely(fill_vec_buf_split(dev, vq, cur_idx,
417 &vec_idx, buf_vec,
418 &head_idx, &len,
419 VHOST_ACCESS_RW) < 0))
420 return -1;
421 len = RTE_MIN(len, size);
422 update_shadow_used_ring_split(vq, head_idx, len);
423 size -= len;
424
425 cur_idx++;
426 *num_buffers += 1;
427 }
428
429 *nr_vec = vec_idx;
430
431 return 0;
432 }
433
434 static __rte_always_inline int
435 fill_vec_buf_packed_indirect(struct virtio_net *dev,
436 struct vhost_virtqueue *vq,
437 struct vring_packed_desc *desc, uint16_t *vec_idx,
438 struct buf_vector *buf_vec, uint32_t *len, uint8_t perm)
439 {
440 uint16_t i;
441 uint32_t nr_descs;
442 uint16_t vec_id = *vec_idx;
443 uint64_t dlen;
444 struct vring_packed_desc *descs, *idescs = NULL;
445
446 dlen = desc->len;
447 descs = (struct vring_packed_desc *)(uintptr_t)
448 vhost_iova_to_vva(dev, vq, desc->addr, &dlen, VHOST_ACCESS_RO);
449 if (unlikely(!descs))
450 return -1;
451
452 if (unlikely(dlen < desc->len)) {
453 /*
454 * The indirect desc table is not contiguous
455 * in process VA space, we have to copy it.
456 */
457 idescs = alloc_copy_ind_table(dev, vq, desc->addr, desc->len);
458 if (unlikely(!idescs))
459 return -1;
460
461 descs = idescs;
462 }
463
464 nr_descs = desc->len / sizeof(struct vring_packed_desc);
465 if (unlikely(nr_descs >= vq->size)) {
466 free_ind_table(idescs);
467 return -1;
468 }
469
470 for (i = 0; i < nr_descs; i++) {
471 if (unlikely(vec_id >= BUF_VECTOR_MAX)) {
472 free_ind_table(idescs);
473 return -1;
474 }
475
476 *len += descs[i].len;
477 if (unlikely(map_one_desc(dev, vq, buf_vec, &vec_id,
478 descs[i].addr, descs[i].len,
479 perm)))
480 return -1;
481 }
482 *vec_idx = vec_id;
483
484 if (unlikely(!!idescs))
485 free_ind_table(idescs);
486
487 return 0;
488 }
489
490 static __rte_always_inline int
491 fill_vec_buf_packed(struct virtio_net *dev, struct vhost_virtqueue *vq,
492 uint16_t avail_idx, uint16_t *desc_count,
493 struct buf_vector *buf_vec, uint16_t *vec_idx,
494 uint16_t *buf_id, uint32_t *len, uint8_t perm)
495 {
496 bool wrap_counter = vq->avail_wrap_counter;
497 struct vring_packed_desc *descs = vq->desc_packed;
498 uint16_t vec_id = *vec_idx;
499
500 if (avail_idx < vq->last_avail_idx)
501 wrap_counter ^= 1;
502
503 if (unlikely(!desc_is_avail(&descs[avail_idx], wrap_counter)))
504 return -1;
505
506 /*
507 * The ordering between desc flags and desc
508 * content reads need to be enforced.
509 */
510 rte_smp_rmb();
511
512 *desc_count = 0;
513 *len = 0;
514
515 while (1) {
516 if (unlikely(vec_id >= BUF_VECTOR_MAX))
517 return -1;
518
519 if (unlikely(*desc_count >= vq->size))
520 return -1;
521
522 *desc_count += 1;
523 *buf_id = descs[avail_idx].id;
524
525 if (descs[avail_idx].flags & VRING_DESC_F_INDIRECT) {
526 if (unlikely(fill_vec_buf_packed_indirect(dev, vq,
527 &descs[avail_idx],
528 &vec_id, buf_vec,
529 len, perm) < 0))
530 return -1;
531 } else {
532 *len += descs[avail_idx].len;
533
534 if (unlikely(map_one_desc(dev, vq, buf_vec, &vec_id,
535 descs[avail_idx].addr,
536 descs[avail_idx].len,
537 perm)))
538 return -1;
539 }
540
541 if ((descs[avail_idx].flags & VRING_DESC_F_NEXT) == 0)
542 break;
543
544 if (++avail_idx >= vq->size) {
545 avail_idx -= vq->size;
546 wrap_counter ^= 1;
547 }
548 }
549
550 *vec_idx = vec_id;
551
552 return 0;
553 }
554
555 /*
556 * Returns -1 on fail, 0 on success
557 */
558 static inline int
559 reserve_avail_buf_packed(struct virtio_net *dev, struct vhost_virtqueue *vq,
560 uint32_t size, struct buf_vector *buf_vec,
561 uint16_t *nr_vec, uint16_t *num_buffers,
562 uint16_t *nr_descs)
563 {
564 uint16_t avail_idx;
565 uint16_t vec_idx = 0;
566 uint16_t max_tries, tries = 0;
567
568 uint16_t buf_id = 0;
569 uint32_t len = 0;
570 uint16_t desc_count;
571
572 *num_buffers = 0;
573 avail_idx = vq->last_avail_idx;
574
575 if (rxvq_is_mergeable(dev))
576 max_tries = vq->size - 1;
577 else
578 max_tries = 1;
579
580 while (size > 0) {
581 /*
582 * if we tried all available ring items, and still
583 * can't get enough buf, it means something abnormal
584 * happened.
585 */
586 if (unlikely(++tries > max_tries))
587 return -1;
588
589 if (unlikely(fill_vec_buf_packed(dev, vq,
590 avail_idx, &desc_count,
591 buf_vec, &vec_idx,
592 &buf_id, &len,
593 VHOST_ACCESS_RW) < 0))
594 return -1;
595
596 len = RTE_MIN(len, size);
597 update_shadow_used_ring_packed(vq, buf_id, len, desc_count);
598 size -= len;
599
600 avail_idx += desc_count;
601 if (avail_idx >= vq->size)
602 avail_idx -= vq->size;
603
604 *nr_descs += desc_count;
605 *num_buffers += 1;
606 }
607
608 *nr_vec = vec_idx;
609
610 return 0;
611 }
612
613 static __rte_always_inline int
614 copy_mbuf_to_desc(struct virtio_net *dev, struct vhost_virtqueue *vq,
615 struct rte_mbuf *m, struct buf_vector *buf_vec,
616 uint16_t nr_vec, uint16_t num_buffers)
617 {
618 uint32_t vec_idx = 0;
619 uint32_t mbuf_offset, mbuf_avail;
620 uint32_t buf_offset, buf_avail;
621 uint64_t buf_addr, buf_iova, buf_len;
622 uint32_t cpy_len;
623 uint64_t hdr_addr;
624 struct rte_mbuf *hdr_mbuf;
625 struct batch_copy_elem *batch_copy = vq->batch_copy_elems;
626 struct virtio_net_hdr_mrg_rxbuf tmp_hdr, *hdr = NULL;
627 int error = 0;
628
629 if (unlikely(m == NULL)) {
630 error = -1;
631 goto out;
632 }
633
634 buf_addr = buf_vec[vec_idx].buf_addr;
635 buf_iova = buf_vec[vec_idx].buf_iova;
636 buf_len = buf_vec[vec_idx].buf_len;
637
638 if (nr_vec > 1)
639 rte_prefetch0((void *)(uintptr_t)buf_vec[1].buf_addr);
640
641 if (unlikely(buf_len < dev->vhost_hlen && nr_vec <= 1)) {
642 error = -1;
643 goto out;
644 }
645
646 hdr_mbuf = m;
647 hdr_addr = buf_addr;
648 if (unlikely(buf_len < dev->vhost_hlen))
649 hdr = &tmp_hdr;
650 else
651 hdr = (struct virtio_net_hdr_mrg_rxbuf *)(uintptr_t)hdr_addr;
652
653 VHOST_LOG_DEBUG(VHOST_DATA, "(%d) RX: num merge buffers %d\n",
654 dev->vid, num_buffers);
655
656 if (unlikely(buf_len < dev->vhost_hlen)) {
657 buf_offset = dev->vhost_hlen - buf_len;
658 vec_idx++;
659 buf_addr = buf_vec[vec_idx].buf_addr;
660 buf_iova = buf_vec[vec_idx].buf_iova;
661 buf_len = buf_vec[vec_idx].buf_len;
662 buf_avail = buf_len - buf_offset;
663 } else {
664 buf_offset = dev->vhost_hlen;
665 buf_avail = buf_len - dev->vhost_hlen;
666 }
667
668 mbuf_avail = rte_pktmbuf_data_len(m);
669 mbuf_offset = 0;
670 while (mbuf_avail != 0 || m->next != NULL) {
671 /* done with current buf, get the next one */
672 if (buf_avail == 0) {
673 vec_idx++;
674 if (unlikely(vec_idx >= nr_vec)) {
675 error = -1;
676 goto out;
677 }
678
679 buf_addr = buf_vec[vec_idx].buf_addr;
680 buf_iova = buf_vec[vec_idx].buf_iova;
681 buf_len = buf_vec[vec_idx].buf_len;
682
683 /* Prefetch next buffer address. */
684 if (vec_idx + 1 < nr_vec)
685 rte_prefetch0((void *)(uintptr_t)
686 buf_vec[vec_idx + 1].buf_addr);
687 buf_offset = 0;
688 buf_avail = buf_len;
689 }
690
691 /* done with current mbuf, get the next one */
692 if (mbuf_avail == 0) {
693 m = m->next;
694
695 mbuf_offset = 0;
696 mbuf_avail = rte_pktmbuf_data_len(m);
697 }
698
699 if (hdr_addr) {
700 virtio_enqueue_offload(hdr_mbuf, &hdr->hdr);
701 if (rxvq_is_mergeable(dev))
702 ASSIGN_UNLESS_EQUAL(hdr->num_buffers,
703 num_buffers);
704
705 if (unlikely(hdr == &tmp_hdr)) {
706 uint64_t len;
707 uint64_t remain = dev->vhost_hlen;
708 uint64_t src = (uint64_t)(uintptr_t)hdr, dst;
709 uint64_t iova = buf_vec[0].buf_iova;
710 uint16_t hdr_vec_idx = 0;
711
712 while (remain) {
713 len = RTE_MIN(remain,
714 buf_vec[hdr_vec_idx].buf_len);
715 dst = buf_vec[hdr_vec_idx].buf_addr;
716 rte_memcpy((void *)(uintptr_t)dst,
717 (void *)(uintptr_t)src,
718 len);
719
720 PRINT_PACKET(dev, (uintptr_t)dst,
721 (uint32_t)len, 0);
722 vhost_log_cache_write(dev, vq,
723 iova, len);
724
725 remain -= len;
726 iova += len;
727 src += len;
728 hdr_vec_idx++;
729 }
730 } else {
731 PRINT_PACKET(dev, (uintptr_t)hdr_addr,
732 dev->vhost_hlen, 0);
733 vhost_log_cache_write(dev, vq,
734 buf_vec[0].buf_iova,
735 dev->vhost_hlen);
736 }
737
738 hdr_addr = 0;
739 }
740
741 cpy_len = RTE_MIN(buf_avail, mbuf_avail);
742
743 if (likely(cpy_len > MAX_BATCH_LEN ||
744 vq->batch_copy_nb_elems >= vq->size)) {
745 rte_memcpy((void *)((uintptr_t)(buf_addr + buf_offset)),
746 rte_pktmbuf_mtod_offset(m, void *, mbuf_offset),
747 cpy_len);
748 vhost_log_cache_write(dev, vq, buf_iova + buf_offset,
749 cpy_len);
750 PRINT_PACKET(dev, (uintptr_t)(buf_addr + buf_offset),
751 cpy_len, 0);
752 } else {
753 batch_copy[vq->batch_copy_nb_elems].dst =
754 (void *)((uintptr_t)(buf_addr + buf_offset));
755 batch_copy[vq->batch_copy_nb_elems].src =
756 rte_pktmbuf_mtod_offset(m, void *, mbuf_offset);
757 batch_copy[vq->batch_copy_nb_elems].log_addr =
758 buf_iova + buf_offset;
759 batch_copy[vq->batch_copy_nb_elems].len = cpy_len;
760 vq->batch_copy_nb_elems++;
761 }
762
763 mbuf_avail -= cpy_len;
764 mbuf_offset += cpy_len;
765 buf_avail -= cpy_len;
766 buf_offset += cpy_len;
767 }
768
769 out:
770
771 return error;
772 }
773
774 static __rte_always_inline uint32_t
775 virtio_dev_rx_split(struct virtio_net *dev, struct vhost_virtqueue *vq,
776 struct rte_mbuf **pkts, uint32_t count)
777 {
778 uint32_t pkt_idx = 0;
779 uint16_t num_buffers;
780 struct buf_vector buf_vec[BUF_VECTOR_MAX];
781 uint16_t avail_head;
782
783 avail_head = *((volatile uint16_t *)&vq->avail->idx);
784
785 /*
786 * The ordering between avail index and
787 * desc reads needs to be enforced.
788 */
789 rte_smp_rmb();
790
791 rte_prefetch0(&vq->avail->ring[vq->last_avail_idx & (vq->size - 1)]);
792
793 for (pkt_idx = 0; pkt_idx < count; pkt_idx++) {
794 uint32_t pkt_len = pkts[pkt_idx]->pkt_len + dev->vhost_hlen;
795 uint16_t nr_vec = 0;
796
797 if (unlikely(reserve_avail_buf_split(dev, vq,
798 pkt_len, buf_vec, &num_buffers,
799 avail_head, &nr_vec) < 0)) {
800 VHOST_LOG_DEBUG(VHOST_DATA,
801 "(%d) failed to get enough desc from vring\n",
802 dev->vid);
803 vq->shadow_used_idx -= num_buffers;
804 break;
805 }
806
807 rte_prefetch0((void *)(uintptr_t)buf_vec[0].buf_addr);
808
809 VHOST_LOG_DEBUG(VHOST_DATA, "(%d) current index %d | end index %d\n",
810 dev->vid, vq->last_avail_idx,
811 vq->last_avail_idx + num_buffers);
812
813 if (copy_mbuf_to_desc(dev, vq, pkts[pkt_idx],
814 buf_vec, nr_vec,
815 num_buffers) < 0) {
816 vq->shadow_used_idx -= num_buffers;
817 break;
818 }
819
820 vq->last_avail_idx += num_buffers;
821 }
822
823 do_data_copy_enqueue(dev, vq);
824
825 if (likely(vq->shadow_used_idx)) {
826 flush_shadow_used_ring_split(dev, vq);
827 vhost_vring_call_split(dev, vq);
828 }
829
830 return pkt_idx;
831 }
832
833 static __rte_always_inline uint32_t
834 virtio_dev_rx_packed(struct virtio_net *dev, struct vhost_virtqueue *vq,
835 struct rte_mbuf **pkts, uint32_t count)
836 {
837 uint32_t pkt_idx = 0;
838 uint16_t num_buffers;
839 struct buf_vector buf_vec[BUF_VECTOR_MAX];
840
841 for (pkt_idx = 0; pkt_idx < count; pkt_idx++) {
842 uint32_t pkt_len = pkts[pkt_idx]->pkt_len + dev->vhost_hlen;
843 uint16_t nr_vec = 0;
844 uint16_t nr_descs = 0;
845
846 if (unlikely(reserve_avail_buf_packed(dev, vq,
847 pkt_len, buf_vec, &nr_vec,
848 &num_buffers, &nr_descs) < 0)) {
849 VHOST_LOG_DEBUG(VHOST_DATA,
850 "(%d) failed to get enough desc from vring\n",
851 dev->vid);
852 vq->shadow_used_idx -= num_buffers;
853 break;
854 }
855
856 rte_prefetch0((void *)(uintptr_t)buf_vec[0].buf_addr);
857
858 VHOST_LOG_DEBUG(VHOST_DATA, "(%d) current index %d | end index %d\n",
859 dev->vid, vq->last_avail_idx,
860 vq->last_avail_idx + num_buffers);
861
862 if (copy_mbuf_to_desc(dev, vq, pkts[pkt_idx],
863 buf_vec, nr_vec,
864 num_buffers) < 0) {
865 vq->shadow_used_idx -= num_buffers;
866 break;
867 }
868
869 vq->last_avail_idx += nr_descs;
870 if (vq->last_avail_idx >= vq->size) {
871 vq->last_avail_idx -= vq->size;
872 vq->avail_wrap_counter ^= 1;
873 }
874 }
875
876 do_data_copy_enqueue(dev, vq);
877
878 if (likely(vq->shadow_used_idx)) {
879 flush_shadow_used_ring_packed(dev, vq);
880 vhost_vring_call_packed(dev, vq);
881 }
882
883 return pkt_idx;
884 }
885
886 static __rte_always_inline uint32_t
887 virtio_dev_rx(struct virtio_net *dev, uint16_t queue_id,
888 struct rte_mbuf **pkts, uint32_t count)
889 {
890 struct vhost_virtqueue *vq;
891 uint32_t nb_tx = 0;
892
893 VHOST_LOG_DEBUG(VHOST_DATA, "(%d) %s\n", dev->vid, __func__);
894 if (unlikely(!is_valid_virt_queue_idx(queue_id, 0, dev->nr_vring))) {
895 RTE_LOG(ERR, VHOST_DATA, "(%d) %s: invalid virtqueue idx %d.\n",
896 dev->vid, __func__, queue_id);
897 return 0;
898 }
899
900 vq = dev->virtqueue[queue_id];
901
902 rte_spinlock_lock(&vq->access_lock);
903
904 if (unlikely(vq->enabled == 0))
905 goto out_access_unlock;
906
907 if (dev->features & (1ULL << VIRTIO_F_IOMMU_PLATFORM))
908 vhost_user_iotlb_rd_lock(vq);
909
910 if (unlikely(vq->access_ok == 0))
911 if (unlikely(vring_translate(dev, vq) < 0))
912 goto out;
913
914 count = RTE_MIN((uint32_t)MAX_PKT_BURST, count);
915 if (count == 0)
916 goto out;
917
918 if (vq_is_packed(dev))
919 nb_tx = virtio_dev_rx_packed(dev, vq, pkts, count);
920 else
921 nb_tx = virtio_dev_rx_split(dev, vq, pkts, count);
922
923 out:
924 if (dev->features & (1ULL << VIRTIO_F_IOMMU_PLATFORM))
925 vhost_user_iotlb_rd_unlock(vq);
926
927 out_access_unlock:
928 rte_spinlock_unlock(&vq->access_lock);
929
930 return nb_tx;
931 }
932
933 uint16_t
934 rte_vhost_enqueue_burst(int vid, uint16_t queue_id,
935 struct rte_mbuf **pkts, uint16_t count)
936 {
937 struct virtio_net *dev = get_device(vid);
938
939 if (!dev)
940 return 0;
941
942 if (unlikely(!(dev->flags & VIRTIO_DEV_BUILTIN_VIRTIO_NET))) {
943 RTE_LOG(ERR, VHOST_DATA,
944 "(%d) %s: built-in vhost net backend is disabled.\n",
945 dev->vid, __func__);
946 return 0;
947 }
948
949 return virtio_dev_rx(dev, queue_id, pkts, count);
950 }
951
952 static inline bool
953 virtio_net_with_host_offload(struct virtio_net *dev)
954 {
955 if (dev->features &
956 ((1ULL << VIRTIO_NET_F_CSUM) |
957 (1ULL << VIRTIO_NET_F_HOST_ECN) |
958 (1ULL << VIRTIO_NET_F_HOST_TSO4) |
959 (1ULL << VIRTIO_NET_F_HOST_TSO6) |
960 (1ULL << VIRTIO_NET_F_HOST_UFO)))
961 return true;
962
963 return false;
964 }
965
966 static void
967 parse_ethernet(struct rte_mbuf *m, uint16_t *l4_proto, void **l4_hdr)
968 {
969 struct ipv4_hdr *ipv4_hdr;
970 struct ipv6_hdr *ipv6_hdr;
971 void *l3_hdr = NULL;
972 struct ether_hdr *eth_hdr;
973 uint16_t ethertype;
974
975 eth_hdr = rte_pktmbuf_mtod(m, struct ether_hdr *);
976
977 m->l2_len = sizeof(struct ether_hdr);
978 ethertype = rte_be_to_cpu_16(eth_hdr->ether_type);
979
980 if (ethertype == ETHER_TYPE_VLAN) {
981 struct vlan_hdr *vlan_hdr = (struct vlan_hdr *)(eth_hdr + 1);
982
983 m->l2_len += sizeof(struct vlan_hdr);
984 ethertype = rte_be_to_cpu_16(vlan_hdr->eth_proto);
985 }
986
987 l3_hdr = (char *)eth_hdr + m->l2_len;
988
989 switch (ethertype) {
990 case ETHER_TYPE_IPv4:
991 ipv4_hdr = l3_hdr;
992 *l4_proto = ipv4_hdr->next_proto_id;
993 m->l3_len = (ipv4_hdr->version_ihl & 0x0f) * 4;
994 *l4_hdr = (char *)l3_hdr + m->l3_len;
995 m->ol_flags |= PKT_TX_IPV4;
996 break;
997 case ETHER_TYPE_IPv6:
998 ipv6_hdr = l3_hdr;
999 *l4_proto = ipv6_hdr->proto;
1000 m->l3_len = sizeof(struct ipv6_hdr);
1001 *l4_hdr = (char *)l3_hdr + m->l3_len;
1002 m->ol_flags |= PKT_TX_IPV6;
1003 break;
1004 default:
1005 m->l3_len = 0;
1006 *l4_proto = 0;
1007 *l4_hdr = NULL;
1008 break;
1009 }
1010 }
1011
1012 static __rte_always_inline void
1013 vhost_dequeue_offload(struct virtio_net_hdr *hdr, struct rte_mbuf *m)
1014 {
1015 uint16_t l4_proto = 0;
1016 void *l4_hdr = NULL;
1017 struct tcp_hdr *tcp_hdr = NULL;
1018
1019 if (hdr->flags == 0 && hdr->gso_type == VIRTIO_NET_HDR_GSO_NONE)
1020 return;
1021
1022 parse_ethernet(m, &l4_proto, &l4_hdr);
1023 if (hdr->flags == VIRTIO_NET_HDR_F_NEEDS_CSUM) {
1024 if (hdr->csum_start == (m->l2_len + m->l3_len)) {
1025 switch (hdr->csum_offset) {
1026 case (offsetof(struct tcp_hdr, cksum)):
1027 if (l4_proto == IPPROTO_TCP)
1028 m->ol_flags |= PKT_TX_TCP_CKSUM;
1029 break;
1030 case (offsetof(struct udp_hdr, dgram_cksum)):
1031 if (l4_proto == IPPROTO_UDP)
1032 m->ol_flags |= PKT_TX_UDP_CKSUM;
1033 break;
1034 case (offsetof(struct sctp_hdr, cksum)):
1035 if (l4_proto == IPPROTO_SCTP)
1036 m->ol_flags |= PKT_TX_SCTP_CKSUM;
1037 break;
1038 default:
1039 break;
1040 }
1041 }
1042 }
1043
1044 if (l4_hdr && hdr->gso_type != VIRTIO_NET_HDR_GSO_NONE) {
1045 switch (hdr->gso_type & ~VIRTIO_NET_HDR_GSO_ECN) {
1046 case VIRTIO_NET_HDR_GSO_TCPV4:
1047 case VIRTIO_NET_HDR_GSO_TCPV6:
1048 tcp_hdr = l4_hdr;
1049 m->ol_flags |= PKT_TX_TCP_SEG;
1050 m->tso_segsz = hdr->gso_size;
1051 m->l4_len = (tcp_hdr->data_off & 0xf0) >> 2;
1052 break;
1053 case VIRTIO_NET_HDR_GSO_UDP:
1054 m->ol_flags |= PKT_TX_UDP_SEG;
1055 m->tso_segsz = hdr->gso_size;
1056 m->l4_len = sizeof(struct udp_hdr);
1057 break;
1058 default:
1059 RTE_LOG(WARNING, VHOST_DATA,
1060 "unsupported gso type %u.\n", hdr->gso_type);
1061 break;
1062 }
1063 }
1064 }
1065
1066 static __rte_always_inline int
1067 copy_desc_to_mbuf(struct virtio_net *dev, struct vhost_virtqueue *vq,
1068 struct buf_vector *buf_vec, uint16_t nr_vec,
1069 struct rte_mbuf *m, struct rte_mempool *mbuf_pool)
1070 {
1071 uint32_t buf_avail, buf_offset;
1072 uint64_t buf_addr, buf_iova, buf_len;
1073 uint32_t mbuf_avail, mbuf_offset;
1074 uint32_t cpy_len;
1075 struct rte_mbuf *cur = m, *prev = m;
1076 struct virtio_net_hdr tmp_hdr;
1077 struct virtio_net_hdr *hdr = NULL;
1078 /* A counter to avoid desc dead loop chain */
1079 uint16_t vec_idx = 0;
1080 struct batch_copy_elem *batch_copy = vq->batch_copy_elems;
1081 int error = 0;
1082
1083 buf_addr = buf_vec[vec_idx].buf_addr;
1084 buf_iova = buf_vec[vec_idx].buf_iova;
1085 buf_len = buf_vec[vec_idx].buf_len;
1086
1087 if (unlikely(buf_len < dev->vhost_hlen && nr_vec <= 1)) {
1088 error = -1;
1089 goto out;
1090 }
1091
1092 if (likely(nr_vec > 1))
1093 rte_prefetch0((void *)(uintptr_t)buf_vec[1].buf_addr);
1094
1095 if (virtio_net_with_host_offload(dev)) {
1096 if (unlikely(buf_len < sizeof(struct virtio_net_hdr))) {
1097 uint64_t len;
1098 uint64_t remain = sizeof(struct virtio_net_hdr);
1099 uint64_t src;
1100 uint64_t dst = (uint64_t)(uintptr_t)&tmp_hdr;
1101 uint16_t hdr_vec_idx = 0;
1102
1103 /*
1104 * No luck, the virtio-net header doesn't fit
1105 * in a contiguous virtual area.
1106 */
1107 while (remain) {
1108 len = RTE_MIN(remain,
1109 buf_vec[hdr_vec_idx].buf_len);
1110 src = buf_vec[hdr_vec_idx].buf_addr;
1111 rte_memcpy((void *)(uintptr_t)dst,
1112 (void *)(uintptr_t)src, len);
1113
1114 remain -= len;
1115 dst += len;
1116 hdr_vec_idx++;
1117 }
1118
1119 hdr = &tmp_hdr;
1120 } else {
1121 hdr = (struct virtio_net_hdr *)((uintptr_t)buf_addr);
1122 rte_prefetch0(hdr);
1123 }
1124 }
1125
1126 /*
1127 * A virtio driver normally uses at least 2 desc buffers
1128 * for Tx: the first for storing the header, and others
1129 * for storing the data.
1130 */
1131 if (unlikely(buf_len < dev->vhost_hlen)) {
1132 buf_offset = dev->vhost_hlen - buf_len;
1133 vec_idx++;
1134 buf_addr = buf_vec[vec_idx].buf_addr;
1135 buf_iova = buf_vec[vec_idx].buf_iova;
1136 buf_len = buf_vec[vec_idx].buf_len;
1137 buf_avail = buf_len - buf_offset;
1138 } else if (buf_len == dev->vhost_hlen) {
1139 if (unlikely(++vec_idx >= nr_vec))
1140 goto out;
1141 buf_addr = buf_vec[vec_idx].buf_addr;
1142 buf_iova = buf_vec[vec_idx].buf_iova;
1143 buf_len = buf_vec[vec_idx].buf_len;
1144
1145 buf_offset = 0;
1146 buf_avail = buf_len;
1147 } else {
1148 buf_offset = dev->vhost_hlen;
1149 buf_avail = buf_vec[vec_idx].buf_len - dev->vhost_hlen;
1150 }
1151
1152 rte_prefetch0((void *)(uintptr_t)
1153 (buf_addr + buf_offset));
1154
1155 PRINT_PACKET(dev,
1156 (uintptr_t)(buf_addr + buf_offset),
1157 (uint32_t)buf_avail, 0);
1158
1159 mbuf_offset = 0;
1160 mbuf_avail = m->buf_len - RTE_PKTMBUF_HEADROOM;
1161 while (1) {
1162 uint64_t hpa;
1163
1164 cpy_len = RTE_MIN(buf_avail, mbuf_avail);
1165
1166 /*
1167 * A desc buf might across two host physical pages that are
1168 * not continuous. In such case (gpa_to_hpa returns 0), data
1169 * will be copied even though zero copy is enabled.
1170 */
1171 if (unlikely(dev->dequeue_zero_copy && (hpa = gpa_to_hpa(dev,
1172 buf_iova + buf_offset, cpy_len)))) {
1173 cur->data_len = cpy_len;
1174 cur->data_off = 0;
1175 cur->buf_addr =
1176 (void *)(uintptr_t)(buf_addr + buf_offset);
1177 cur->buf_iova = hpa;
1178
1179 /*
1180 * In zero copy mode, one mbuf can only reference data
1181 * for one or partial of one desc buff.
1182 */
1183 mbuf_avail = cpy_len;
1184 } else {
1185 if (likely(cpy_len > MAX_BATCH_LEN ||
1186 vq->batch_copy_nb_elems >= vq->size ||
1187 (hdr && cur == m))) {
1188 rte_memcpy(rte_pktmbuf_mtod_offset(cur, void *,
1189 mbuf_offset),
1190 (void *)((uintptr_t)(buf_addr +
1191 buf_offset)),
1192 cpy_len);
1193 } else {
1194 batch_copy[vq->batch_copy_nb_elems].dst =
1195 rte_pktmbuf_mtod_offset(cur, void *,
1196 mbuf_offset);
1197 batch_copy[vq->batch_copy_nb_elems].src =
1198 (void *)((uintptr_t)(buf_addr +
1199 buf_offset));
1200 batch_copy[vq->batch_copy_nb_elems].len =
1201 cpy_len;
1202 vq->batch_copy_nb_elems++;
1203 }
1204 }
1205
1206 mbuf_avail -= cpy_len;
1207 mbuf_offset += cpy_len;
1208 buf_avail -= cpy_len;
1209 buf_offset += cpy_len;
1210
1211 /* This buf reaches to its end, get the next one */
1212 if (buf_avail == 0) {
1213 if (++vec_idx >= nr_vec)
1214 break;
1215
1216 buf_addr = buf_vec[vec_idx].buf_addr;
1217 buf_iova = buf_vec[vec_idx].buf_iova;
1218 buf_len = buf_vec[vec_idx].buf_len;
1219
1220 /*
1221 * Prefecth desc n + 1 buffer while
1222 * desc n buffer is processed.
1223 */
1224 if (vec_idx + 1 < nr_vec)
1225 rte_prefetch0((void *)(uintptr_t)
1226 buf_vec[vec_idx + 1].buf_addr);
1227
1228 buf_offset = 0;
1229 buf_avail = buf_len;
1230
1231 PRINT_PACKET(dev, (uintptr_t)buf_addr,
1232 (uint32_t)buf_avail, 0);
1233 }
1234
1235 /*
1236 * This mbuf reaches to its end, get a new one
1237 * to hold more data.
1238 */
1239 if (mbuf_avail == 0) {
1240 cur = rte_pktmbuf_alloc(mbuf_pool);
1241 if (unlikely(cur == NULL)) {
1242 RTE_LOG(ERR, VHOST_DATA, "Failed to "
1243 "allocate memory for mbuf.\n");
1244 error = -1;
1245 goto out;
1246 }
1247 if (unlikely(dev->dequeue_zero_copy))
1248 rte_mbuf_refcnt_update(cur, 1);
1249
1250 prev->next = cur;
1251 prev->data_len = mbuf_offset;
1252 m->nb_segs += 1;
1253 m->pkt_len += mbuf_offset;
1254 prev = cur;
1255
1256 mbuf_offset = 0;
1257 mbuf_avail = cur->buf_len - RTE_PKTMBUF_HEADROOM;
1258 }
1259 }
1260
1261 prev->data_len = mbuf_offset;
1262 m->pkt_len += mbuf_offset;
1263
1264 if (hdr)
1265 vhost_dequeue_offload(hdr, m);
1266
1267 out:
1268
1269 return error;
1270 }
1271
1272 static __rte_always_inline struct zcopy_mbuf *
1273 get_zmbuf(struct vhost_virtqueue *vq)
1274 {
1275 uint16_t i;
1276 uint16_t last;
1277 int tries = 0;
1278
1279 /* search [last_zmbuf_idx, zmbuf_size) */
1280 i = vq->last_zmbuf_idx;
1281 last = vq->zmbuf_size;
1282
1283 again:
1284 for (; i < last; i++) {
1285 if (vq->zmbufs[i].in_use == 0) {
1286 vq->last_zmbuf_idx = i + 1;
1287 vq->zmbufs[i].in_use = 1;
1288 return &vq->zmbufs[i];
1289 }
1290 }
1291
1292 tries++;
1293 if (tries == 1) {
1294 /* search [0, last_zmbuf_idx) */
1295 i = 0;
1296 last = vq->last_zmbuf_idx;
1297 goto again;
1298 }
1299
1300 return NULL;
1301 }
1302
1303 static __rte_always_inline uint16_t
1304 virtio_dev_tx_split(struct virtio_net *dev, struct vhost_virtqueue *vq,
1305 struct rte_mempool *mbuf_pool, struct rte_mbuf **pkts, uint16_t count)
1306 {
1307 uint16_t i;
1308 uint16_t free_entries;
1309
1310 if (unlikely(dev->dequeue_zero_copy)) {
1311 struct zcopy_mbuf *zmbuf, *next;
1312
1313 for (zmbuf = TAILQ_FIRST(&vq->zmbuf_list);
1314 zmbuf != NULL; zmbuf = next) {
1315 next = TAILQ_NEXT(zmbuf, next);
1316
1317 if (mbuf_is_consumed(zmbuf->mbuf)) {
1318 update_shadow_used_ring_split(vq,
1319 zmbuf->desc_idx, 0);
1320 TAILQ_REMOVE(&vq->zmbuf_list, zmbuf, next);
1321 restore_mbuf(zmbuf->mbuf);
1322 rte_pktmbuf_free(zmbuf->mbuf);
1323 put_zmbuf(zmbuf);
1324 vq->nr_zmbuf -= 1;
1325 }
1326 }
1327
1328 if (likely(vq->shadow_used_idx)) {
1329 flush_shadow_used_ring_split(dev, vq);
1330 vhost_vring_call_split(dev, vq);
1331 }
1332 }
1333
1334 free_entries = *((volatile uint16_t *)&vq->avail->idx) -
1335 vq->last_avail_idx;
1336 if (free_entries == 0)
1337 return 0;
1338
1339 /*
1340 * The ordering between avail index and
1341 * desc reads needs to be enforced.
1342 */
1343 rte_smp_rmb();
1344
1345 rte_prefetch0(&vq->avail->ring[vq->last_avail_idx & (vq->size - 1)]);
1346
1347 VHOST_LOG_DEBUG(VHOST_DATA, "(%d) %s\n", dev->vid, __func__);
1348
1349 count = RTE_MIN(count, MAX_PKT_BURST);
1350 count = RTE_MIN(count, free_entries);
1351 VHOST_LOG_DEBUG(VHOST_DATA, "(%d) about to dequeue %u buffers\n",
1352 dev->vid, count);
1353
1354 for (i = 0; i < count; i++) {
1355 struct buf_vector buf_vec[BUF_VECTOR_MAX];
1356 uint16_t head_idx;
1357 uint32_t dummy_len;
1358 uint16_t nr_vec = 0;
1359 int err;
1360
1361 if (unlikely(fill_vec_buf_split(dev, vq,
1362 vq->last_avail_idx + i,
1363 &nr_vec, buf_vec,
1364 &head_idx, &dummy_len,
1365 VHOST_ACCESS_RO) < 0))
1366 break;
1367
1368 if (likely(dev->dequeue_zero_copy == 0))
1369 update_shadow_used_ring_split(vq, head_idx, 0);
1370
1371 rte_prefetch0((void *)(uintptr_t)buf_vec[0].buf_addr);
1372
1373 pkts[i] = rte_pktmbuf_alloc(mbuf_pool);
1374 if (unlikely(pkts[i] == NULL)) {
1375 RTE_LOG(ERR, VHOST_DATA,
1376 "Failed to allocate memory for mbuf.\n");
1377 break;
1378 }
1379
1380 err = copy_desc_to_mbuf(dev, vq, buf_vec, nr_vec, pkts[i],
1381 mbuf_pool);
1382 if (unlikely(err)) {
1383 rte_pktmbuf_free(pkts[i]);
1384 break;
1385 }
1386
1387 if (unlikely(dev->dequeue_zero_copy)) {
1388 struct zcopy_mbuf *zmbuf;
1389
1390 zmbuf = get_zmbuf(vq);
1391 if (!zmbuf) {
1392 rte_pktmbuf_free(pkts[i]);
1393 break;
1394 }
1395 zmbuf->mbuf = pkts[i];
1396 zmbuf->desc_idx = head_idx;
1397
1398 /*
1399 * Pin lock the mbuf; we will check later to see
1400 * whether the mbuf is freed (when we are the last
1401 * user) or not. If that's the case, we then could
1402 * update the used ring safely.
1403 */
1404 rte_mbuf_refcnt_update(pkts[i], 1);
1405
1406 vq->nr_zmbuf += 1;
1407 TAILQ_INSERT_TAIL(&vq->zmbuf_list, zmbuf, next);
1408 }
1409 }
1410 vq->last_avail_idx += i;
1411
1412 if (likely(dev->dequeue_zero_copy == 0)) {
1413 do_data_copy_dequeue(vq);
1414 if (unlikely(i < count))
1415 vq->shadow_used_idx = i;
1416 if (likely(vq->shadow_used_idx)) {
1417 flush_shadow_used_ring_split(dev, vq);
1418 vhost_vring_call_split(dev, vq);
1419 }
1420 }
1421
1422 return i;
1423 }
1424
1425 static __rte_always_inline uint16_t
1426 virtio_dev_tx_packed(struct virtio_net *dev, struct vhost_virtqueue *vq,
1427 struct rte_mempool *mbuf_pool, struct rte_mbuf **pkts, uint16_t count)
1428 {
1429 uint16_t i;
1430
1431 if (unlikely(dev->dequeue_zero_copy)) {
1432 struct zcopy_mbuf *zmbuf, *next;
1433
1434 for (zmbuf = TAILQ_FIRST(&vq->zmbuf_list);
1435 zmbuf != NULL; zmbuf = next) {
1436 next = TAILQ_NEXT(zmbuf, next);
1437
1438 if (mbuf_is_consumed(zmbuf->mbuf)) {
1439 update_shadow_used_ring_packed(vq,
1440 zmbuf->desc_idx,
1441 0,
1442 zmbuf->desc_count);
1443
1444 TAILQ_REMOVE(&vq->zmbuf_list, zmbuf, next);
1445 restore_mbuf(zmbuf->mbuf);
1446 rte_pktmbuf_free(zmbuf->mbuf);
1447 put_zmbuf(zmbuf);
1448 vq->nr_zmbuf -= 1;
1449 }
1450 }
1451
1452 if (likely(vq->shadow_used_idx)) {
1453 flush_shadow_used_ring_packed(dev, vq);
1454 vhost_vring_call_packed(dev, vq);
1455 }
1456 }
1457
1458 VHOST_LOG_DEBUG(VHOST_DATA, "(%d) %s\n", dev->vid, __func__);
1459
1460 count = RTE_MIN(count, MAX_PKT_BURST);
1461 VHOST_LOG_DEBUG(VHOST_DATA, "(%d) about to dequeue %u buffers\n",
1462 dev->vid, count);
1463
1464 for (i = 0; i < count; i++) {
1465 struct buf_vector buf_vec[BUF_VECTOR_MAX];
1466 uint16_t buf_id;
1467 uint32_t dummy_len;
1468 uint16_t desc_count, nr_vec = 0;
1469 int err;
1470
1471 if (unlikely(fill_vec_buf_packed(dev, vq,
1472 vq->last_avail_idx, &desc_count,
1473 buf_vec, &nr_vec,
1474 &buf_id, &dummy_len,
1475 VHOST_ACCESS_RO) < 0))
1476 break;
1477
1478 if (likely(dev->dequeue_zero_copy == 0))
1479 update_shadow_used_ring_packed(vq, buf_id, 0,
1480 desc_count);
1481
1482 rte_prefetch0((void *)(uintptr_t)buf_vec[0].buf_addr);
1483
1484 pkts[i] = rte_pktmbuf_alloc(mbuf_pool);
1485 if (unlikely(pkts[i] == NULL)) {
1486 RTE_LOG(ERR, VHOST_DATA,
1487 "Failed to allocate memory for mbuf.\n");
1488 break;
1489 }
1490
1491 err = copy_desc_to_mbuf(dev, vq, buf_vec, nr_vec, pkts[i],
1492 mbuf_pool);
1493 if (unlikely(err)) {
1494 rte_pktmbuf_free(pkts[i]);
1495 break;
1496 }
1497
1498 if (unlikely(dev->dequeue_zero_copy)) {
1499 struct zcopy_mbuf *zmbuf;
1500
1501 zmbuf = get_zmbuf(vq);
1502 if (!zmbuf) {
1503 rte_pktmbuf_free(pkts[i]);
1504 break;
1505 }
1506 zmbuf->mbuf = pkts[i];
1507 zmbuf->desc_idx = buf_id;
1508 zmbuf->desc_count = desc_count;
1509
1510 /*
1511 * Pin lock the mbuf; we will check later to see
1512 * whether the mbuf is freed (when we are the last
1513 * user) or not. If that's the case, we then could
1514 * update the used ring safely.
1515 */
1516 rte_mbuf_refcnt_update(pkts[i], 1);
1517
1518 vq->nr_zmbuf += 1;
1519 TAILQ_INSERT_TAIL(&vq->zmbuf_list, zmbuf, next);
1520 }
1521
1522 vq->last_avail_idx += desc_count;
1523 if (vq->last_avail_idx >= vq->size) {
1524 vq->last_avail_idx -= vq->size;
1525 vq->avail_wrap_counter ^= 1;
1526 }
1527 }
1528
1529 if (likely(dev->dequeue_zero_copy == 0)) {
1530 do_data_copy_dequeue(vq);
1531 if (unlikely(i < count))
1532 vq->shadow_used_idx = i;
1533 if (likely(vq->shadow_used_idx)) {
1534 flush_shadow_used_ring_packed(dev, vq);
1535 vhost_vring_call_packed(dev, vq);
1536 }
1537 }
1538
1539 return i;
1540 }
1541
1542 uint16_t
1543 rte_vhost_dequeue_burst(int vid, uint16_t queue_id,
1544 struct rte_mempool *mbuf_pool, struct rte_mbuf **pkts, uint16_t count)
1545 {
1546 struct virtio_net *dev;
1547 struct rte_mbuf *rarp_mbuf = NULL;
1548 struct vhost_virtqueue *vq;
1549
1550 dev = get_device(vid);
1551 if (!dev)
1552 return 0;
1553
1554 if (unlikely(!(dev->flags & VIRTIO_DEV_BUILTIN_VIRTIO_NET))) {
1555 RTE_LOG(ERR, VHOST_DATA,
1556 "(%d) %s: built-in vhost net backend is disabled.\n",
1557 dev->vid, __func__);
1558 return 0;
1559 }
1560
1561 if (unlikely(!is_valid_virt_queue_idx(queue_id, 1, dev->nr_vring))) {
1562 RTE_LOG(ERR, VHOST_DATA, "(%d) %s: invalid virtqueue idx %d.\n",
1563 dev->vid, __func__, queue_id);
1564 return 0;
1565 }
1566
1567 vq = dev->virtqueue[queue_id];
1568
1569 if (unlikely(rte_spinlock_trylock(&vq->access_lock) == 0))
1570 return 0;
1571
1572 if (unlikely(vq->enabled == 0)) {
1573 count = 0;
1574 goto out_access_unlock;
1575 }
1576
1577 if (dev->features & (1ULL << VIRTIO_F_IOMMU_PLATFORM))
1578 vhost_user_iotlb_rd_lock(vq);
1579
1580 if (unlikely(vq->access_ok == 0))
1581 if (unlikely(vring_translate(dev, vq) < 0)) {
1582 count = 0;
1583 goto out;
1584 }
1585
1586 /*
1587 * Construct a RARP broadcast packet, and inject it to the "pkts"
1588 * array, to looks like that guest actually send such packet.
1589 *
1590 * Check user_send_rarp() for more information.
1591 *
1592 * broadcast_rarp shares a cacheline in the virtio_net structure
1593 * with some fields that are accessed during enqueue and
1594 * rte_atomic16_cmpset() causes a write if using cmpxchg. This could
1595 * result in false sharing between enqueue and dequeue.
1596 *
1597 * Prevent unnecessary false sharing by reading broadcast_rarp first
1598 * and only performing cmpset if the read indicates it is likely to
1599 * be set.
1600 */
1601 if (unlikely(rte_atomic16_read(&dev->broadcast_rarp) &&
1602 rte_atomic16_cmpset((volatile uint16_t *)
1603 &dev->broadcast_rarp.cnt, 1, 0))) {
1604
1605 rarp_mbuf = rte_net_make_rarp_packet(mbuf_pool, &dev->mac);
1606 if (rarp_mbuf == NULL) {
1607 RTE_LOG(ERR, VHOST_DATA,
1608 "Failed to make RARP packet.\n");
1609 count = 0;
1610 goto out;
1611 }
1612 count -= 1;
1613 }
1614
1615 if (vq_is_packed(dev))
1616 count = virtio_dev_tx_packed(dev, vq, mbuf_pool, pkts, count);
1617 else
1618 count = virtio_dev_tx_split(dev, vq, mbuf_pool, pkts, count);
1619
1620 out:
1621 if (dev->features & (1ULL << VIRTIO_F_IOMMU_PLATFORM))
1622 vhost_user_iotlb_rd_unlock(vq);
1623
1624 out_access_unlock:
1625 rte_spinlock_unlock(&vq->access_lock);
1626
1627 if (unlikely(rarp_mbuf != NULL)) {
1628 /*
1629 * Inject it to the head of "pkts" array, so that switch's mac
1630 * learning table will get updated first.
1631 */
1632 memmove(&pkts[1], pkts, count * sizeof(struct rte_mbuf *));
1633 pkts[0] = rarp_mbuf;
1634 count += 1;
1635 }
1636
1637 return count;
1638 }