1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2010-2015 Intel Corporation
11 #include <tmmintrin.h>
13 #include <rte_byteorder.h>
14 #include <rte_branch_prediction.h>
15 #include <rte_cycles.h>
16 #include <rte_ether.h>
17 #include <rte_ethdev_driver.h>
18 #include <rte_errno.h>
19 #include <rte_memory.h>
20 #include <rte_mempool.h>
21 #include <rte_malloc.h>
23 #include <rte_prefetch.h>
24 #include <rte_string_fns.h>
26 #include "virtio_rxtx_simple.h"
28 #define RTE_VIRTIO_DESC_PER_LOOP 8
30 /* virtio vPMD receive routine, only accept(nb_pkts >= RTE_VIRTIO_DESC_PER_LOOP)
32 * This routine is for non-mergeable RX, one desc for each guest buffer.
33 * This routine is based on the RX ring layout optimization. Each entry in the
34 * avail ring points to the desc with the same index in the desc ring and this
35 * will never be changed in the driver.
37 * - nb_pkts < RTE_VIRTIO_DESC_PER_LOOP, just return no packet
40 virtio_recv_pkts_vec(void *rx_queue
, struct rte_mbuf
**rx_pkts
,
43 struct virtnet_rx
*rxvq
= rx_queue
;
44 struct virtqueue
*vq
= rxvq
->vq
;
45 struct virtio_hw
*hw
= vq
->hw
;
48 struct vring_used_elem
*rused
;
49 struct rte_mbuf
**sw_ring
;
50 struct rte_mbuf
**sw_ring_end
;
51 uint16_t nb_pkts_received
= 0;
52 __m128i shuf_msk1
, shuf_msk2
, len_adjust
;
54 shuf_msk1
= _mm_set_epi8(
55 0xFF, 0xFF, 0xFF, 0xFF,
56 0xFF, 0xFF, /* vlan tci */
58 0xFF, 0xFF, 5, 4, /* pkt len */
59 0xFF, 0xFF, 0xFF, 0xFF /* packet type */
63 shuf_msk2
= _mm_set_epi8(
64 0xFF, 0xFF, 0xFF, 0xFF,
65 0xFF, 0xFF, /* vlan tci */
67 0xFF, 0xFF, 13, 12, /* pkt len */
68 0xFF, 0xFF, 0xFF, 0xFF /* packet type */
71 /* Subtract the header length.
72 * In which case do we need the header length in used->len ?
74 len_adjust
= _mm_set_epi16(
77 (uint16_t)-vq
->hw
->vtnet_hdr_size
,
78 0, (uint16_t)-vq
->hw
->vtnet_hdr_size
,
81 if (unlikely(hw
->started
== 0))
82 return nb_pkts_received
;
84 if (unlikely(nb_pkts
< RTE_VIRTIO_DESC_PER_LOOP
))
87 nb_used
= VIRTQUEUE_NUSED(vq
);
89 rte_compiler_barrier();
91 if (unlikely(nb_used
== 0))
94 nb_pkts
= RTE_ALIGN_FLOOR(nb_pkts
, RTE_VIRTIO_DESC_PER_LOOP
);
95 nb_used
= RTE_MIN(nb_used
, nb_pkts
);
97 desc_idx
= (uint16_t)(vq
->vq_used_cons_idx
& (vq
->vq_nentries
- 1));
98 rused
= &vq
->vq_split
.ring
.used
->ring
[desc_idx
];
99 sw_ring
= &vq
->sw_ring
[desc_idx
];
100 sw_ring_end
= &vq
->sw_ring
[vq
->vq_nentries
];
102 rte_prefetch0(rused
);
104 if (vq
->vq_free_cnt
>= RTE_VIRTIO_VPMD_RX_REARM_THRESH
) {
105 virtio_rxq_rearm_vec(rxvq
);
106 if (unlikely(virtqueue_kick_prepare(vq
)))
107 virtqueue_notify(vq
);
110 for (nb_pkts_received
= 0;
111 nb_pkts_received
< nb_used
;) {
112 __m128i desc
[RTE_VIRTIO_DESC_PER_LOOP
/ 2];
113 __m128i mbp
[RTE_VIRTIO_DESC_PER_LOOP
/ 2];
114 __m128i pkt_mb
[RTE_VIRTIO_DESC_PER_LOOP
];
116 mbp
[0] = _mm_loadu_si128((__m128i
*)(sw_ring
+ 0));
117 desc
[0] = _mm_loadu_si128((__m128i
*)(rused
+ 0));
118 _mm_storeu_si128((__m128i
*)&rx_pkts
[0], mbp
[0]);
120 mbp
[1] = _mm_loadu_si128((__m128i
*)(sw_ring
+ 2));
121 desc
[1] = _mm_loadu_si128((__m128i
*)(rused
+ 2));
122 _mm_storeu_si128((__m128i
*)&rx_pkts
[2], mbp
[1]);
124 mbp
[2] = _mm_loadu_si128((__m128i
*)(sw_ring
+ 4));
125 desc
[2] = _mm_loadu_si128((__m128i
*)(rused
+ 4));
126 _mm_storeu_si128((__m128i
*)&rx_pkts
[4], mbp
[2]);
128 mbp
[3] = _mm_loadu_si128((__m128i
*)(sw_ring
+ 6));
129 desc
[3] = _mm_loadu_si128((__m128i
*)(rused
+ 6));
130 _mm_storeu_si128((__m128i
*)&rx_pkts
[6], mbp
[3]);
132 pkt_mb
[1] = _mm_shuffle_epi8(desc
[0], shuf_msk2
);
133 pkt_mb
[0] = _mm_shuffle_epi8(desc
[0], shuf_msk1
);
134 pkt_mb
[1] = _mm_add_epi16(pkt_mb
[1], len_adjust
);
135 pkt_mb
[0] = _mm_add_epi16(pkt_mb
[0], len_adjust
);
136 _mm_storeu_si128((void *)&rx_pkts
[1]->rx_descriptor_fields1
,
138 _mm_storeu_si128((void *)&rx_pkts
[0]->rx_descriptor_fields1
,
141 pkt_mb
[3] = _mm_shuffle_epi8(desc
[1], shuf_msk2
);
142 pkt_mb
[2] = _mm_shuffle_epi8(desc
[1], shuf_msk1
);
143 pkt_mb
[3] = _mm_add_epi16(pkt_mb
[3], len_adjust
);
144 pkt_mb
[2] = _mm_add_epi16(pkt_mb
[2], len_adjust
);
145 _mm_storeu_si128((void *)&rx_pkts
[3]->rx_descriptor_fields1
,
147 _mm_storeu_si128((void *)&rx_pkts
[2]->rx_descriptor_fields1
,
150 pkt_mb
[5] = _mm_shuffle_epi8(desc
[2], shuf_msk2
);
151 pkt_mb
[4] = _mm_shuffle_epi8(desc
[2], shuf_msk1
);
152 pkt_mb
[5] = _mm_add_epi16(pkt_mb
[5], len_adjust
);
153 pkt_mb
[4] = _mm_add_epi16(pkt_mb
[4], len_adjust
);
154 _mm_storeu_si128((void *)&rx_pkts
[5]->rx_descriptor_fields1
,
156 _mm_storeu_si128((void *)&rx_pkts
[4]->rx_descriptor_fields1
,
159 pkt_mb
[7] = _mm_shuffle_epi8(desc
[3], shuf_msk2
);
160 pkt_mb
[6] = _mm_shuffle_epi8(desc
[3], shuf_msk1
);
161 pkt_mb
[7] = _mm_add_epi16(pkt_mb
[7], len_adjust
);
162 pkt_mb
[6] = _mm_add_epi16(pkt_mb
[6], len_adjust
);
163 _mm_storeu_si128((void *)&rx_pkts
[7]->rx_descriptor_fields1
,
165 _mm_storeu_si128((void *)&rx_pkts
[6]->rx_descriptor_fields1
,
168 if (unlikely(nb_used
<= RTE_VIRTIO_DESC_PER_LOOP
)) {
169 if (sw_ring
+ nb_used
<= sw_ring_end
)
170 nb_pkts_received
+= nb_used
;
172 nb_pkts_received
+= sw_ring_end
- sw_ring
;
175 if (unlikely(sw_ring
+ RTE_VIRTIO_DESC_PER_LOOP
>=
177 nb_pkts_received
+= sw_ring_end
- sw_ring
;
180 nb_pkts_received
+= RTE_VIRTIO_DESC_PER_LOOP
;
182 rx_pkts
+= RTE_VIRTIO_DESC_PER_LOOP
;
183 sw_ring
+= RTE_VIRTIO_DESC_PER_LOOP
;
184 rused
+= RTE_VIRTIO_DESC_PER_LOOP
;
185 nb_used
-= RTE_VIRTIO_DESC_PER_LOOP
;
190 vq
->vq_used_cons_idx
+= nb_pkts_received
;
191 vq
->vq_free_cnt
+= nb_pkts_received
;
192 rxvq
->stats
.packets
+= nb_pkts_received
;
193 return nb_pkts_received
;