4 * Copyright(c) 2010-2016 Intel Corporation. All rights reserved.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * * Neither the name of Intel Corporation nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
34 #ifndef _VIRTIO_RXTX_SIMPLE_H_
35 #define _VIRTIO_RXTX_SIMPLE_H_
39 #include "virtio_logs.h"
40 #include "virtio_ethdev.h"
41 #include "virtqueue.h"
42 #include "virtio_rxtx.h"
44 #define RTE_VIRTIO_VPMD_RX_BURST 32
45 #define RTE_VIRTIO_VPMD_RX_REARM_THRESH RTE_VIRTIO_VPMD_RX_BURST
48 virtio_rxq_rearm_vec(struct virtnet_rx
*rxvq
)
52 struct rte_mbuf
**sw_ring
;
53 struct vring_desc
*start_dp
;
55 struct virtqueue
*vq
= rxvq
->vq
;
57 desc_idx
= vq
->vq_avail_idx
& (vq
->vq_nentries
- 1);
58 sw_ring
= &vq
->sw_ring
[desc_idx
];
59 start_dp
= &vq
->vq_ring
.desc
[desc_idx
];
61 ret
= rte_mempool_get_bulk(rxvq
->mpool
, (void **)sw_ring
,
62 RTE_VIRTIO_VPMD_RX_REARM_THRESH
);
64 rte_eth_devices
[rxvq
->port_id
].data
->rx_mbuf_alloc_failed
+=
65 RTE_VIRTIO_VPMD_RX_REARM_THRESH
;
69 for (i
= 0; i
< RTE_VIRTIO_VPMD_RX_REARM_THRESH
; i
++) {
72 p
= (uintptr_t)&sw_ring
[i
]->rearm_data
;
73 *(uint64_t *)p
= rxvq
->mbuf_initializer
;
76 VIRTIO_MBUF_ADDR(sw_ring
[i
], vq
) +
77 RTE_PKTMBUF_HEADROOM
- vq
->hw
->vtnet_hdr_size
;
78 start_dp
[i
].len
= sw_ring
[i
]->buf_len
-
79 RTE_PKTMBUF_HEADROOM
+ vq
->hw
->vtnet_hdr_size
;
82 vq
->vq_avail_idx
+= RTE_VIRTIO_VPMD_RX_REARM_THRESH
;
83 vq
->vq_free_cnt
-= RTE_VIRTIO_VPMD_RX_REARM_THRESH
;
84 vq_update_avail_idx(vq
);
87 #define VIRTIO_TX_FREE_THRESH 32
88 #define VIRTIO_TX_MAX_FREE_BUF_SZ 32
89 #define VIRTIO_TX_FREE_NR 32
90 /* TODO: vq->tx_free_cnt could mean num of free slots so we could avoid shift */
92 virtio_xmit_cleanup(struct virtqueue
*vq
)
96 struct rte_mbuf
*m
, *free
[VIRTIO_TX_MAX_FREE_BUF_SZ
];
98 desc_idx
= (uint16_t)(vq
->vq_used_cons_idx
&
99 ((vq
->vq_nentries
>> 1) - 1));
100 m
= (struct rte_mbuf
*)vq
->vq_descx
[desc_idx
++].cookie
;
101 m
= __rte_pktmbuf_prefree_seg(m
);
102 if (likely(m
!= NULL
)) {
105 for (i
= 1; i
< VIRTIO_TX_FREE_NR
; i
++) {
106 m
= (struct rte_mbuf
*)vq
->vq_descx
[desc_idx
++].cookie
;
107 m
= __rte_pktmbuf_prefree_seg(m
);
108 if (likely(m
!= NULL
)) {
109 if (likely(m
->pool
== free
[0]->pool
))
112 rte_mempool_put_bulk(free
[0]->pool
,
114 RTE_MIN(RTE_DIM(free
),
121 rte_mempool_put_bulk(free
[0]->pool
, (void **)free
,
122 RTE_MIN(RTE_DIM(free
), nb_free
));
124 for (i
= 1; i
< VIRTIO_TX_FREE_NR
; i
++) {
125 m
= (struct rte_mbuf
*)vq
->vq_descx
[desc_idx
++].cookie
;
126 m
= __rte_pktmbuf_prefree_seg(m
);
128 rte_mempool_put(m
->pool
, m
);
132 vq
->vq_used_cons_idx
+= VIRTIO_TX_FREE_NR
;
133 vq
->vq_free_cnt
+= (VIRTIO_TX_FREE_NR
<< 1);
136 #endif /* _VIRTIO_RXTX_SIMPLE_H_ */