]>
git.proxmox.com Git - ceph.git/blob - ceph/src/dpdk/drivers/net/xenvirt/virtqueue.h
4 * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * * Neither the name of Intel Corporation nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
38 #include <linux/virtio_ring.h>
39 #include <linux/virtio_net.h>
41 #include <rte_atomic.h>
42 #include <rte_memory.h>
43 #include <rte_memzone.h>
44 #include <rte_mempool.h>
46 #include "virtio_logs.h"
50 /* The alignment to use between consumer and producer parts of vring. */
51 #define VIRTIO_PCI_VRING_ALIGN 4096
53 enum { VTNET_RQ
= 0, VTNET_TQ
= 1, VTNET_CQ
= 2 };
56 * The maximum virtqueue size is 2^15. Use that value as the end of
57 * descriptor chain terminator since it will never be a valid index
58 * in the descriptor table. This is used to verify we are correctly
59 * handling vq_free_cnt.
61 #define VQ_RING_DESC_CHAIN_END 32768
63 #define VIRTQUEUE_MAX_NAME_SZ 32
65 struct pmd_internals
{
66 struct rte_eth_stats eth_stats
;
73 char vq_name
[VIRTQUEUE_MAX_NAME_SZ
];
74 struct rte_mempool
*mpool
; /**< mempool for mbuf allocation */
75 uint16_t queue_id
; /**< DPDK queue index. */
76 uint16_t vq_queue_index
; /**< PCI queue index */
77 uint8_t port_id
; /**< Device port identifier. */
79 void *vq_ring_virt_mem
; /**< virtual address of vring*/
83 struct vring vq_ring
; /**< vring keeping desc, used and avail */
84 struct pmd_internals
*internals
; /**< virtio device internal info. */
85 uint16_t vq_nentries
; /**< vring desc numbers */
86 uint16_t vq_desc_head_idx
;
87 uint16_t vq_free_cnt
; /**< num of desc available */
88 uint16_t vq_used_cons_idx
; /**< Last consumed desc in used table, trails vq_ring.used->idx*/
90 struct vq_desc_extra
{
93 } vq_descx
[0] __rte_cache_aligned
;
97 #ifdef RTE_LIBRTE_XENVIRT_DEBUG_DUMP
98 #define VIRTQUEUE_DUMP(vq) do { \
99 uint16_t used_idx, nused; \
100 used_idx = (vq)->vq_ring.used->idx; \
101 nused = (uint16_t)(used_idx - (vq)->vq_used_cons_idx); \
102 PMD_INIT_LOG(DEBUG, \
103 "VQ: %s - size=%d; free=%d; used=%d; desc_head_idx=%d;" \
104 " avail.idx=%d; used_cons_idx=%d; used.idx=%d;" \
105 " avail.flags=0x%x; used.flags=0x%x\n", \
106 (vq)->vq_name, (vq)->vq_nentries, (vq)->vq_free_cnt, nused, \
107 (vq)->vq_desc_head_idx, (vq)->vq_ring.avail->idx, \
108 (vq)->vq_used_cons_idx, (vq)->vq_ring.used->idx, \
109 (vq)->vq_ring.avail->flags, (vq)->vq_ring.used->flags); \
112 #define VIRTQUEUE_DUMP(vq) do { } while (0)
117 * Dump virtqueue internal structures, for debug purpose only.
119 void virtqueue_dump(struct virtqueue
*vq
);
122 * Get all mbufs to be freed.
124 struct rte_mbuf
* virtqueue_detatch_unused(struct virtqueue
*vq
);
126 static inline int __attribute__((always_inline
))
127 virtqueue_full(const struct virtqueue
*vq
)
129 return vq
->vq_free_cnt
== 0;
132 #define VIRTQUEUE_NUSED(vq) ((uint16_t)((vq)->vq_ring.used->idx - (vq)->vq_used_cons_idx))
134 static inline void __attribute__((always_inline
))
135 vq_ring_update_avail(struct virtqueue
*vq
, uint16_t desc_idx
)
139 * Place the head of the descriptor chain into the next slot and make
140 * it usable to the host. The chain is made available now rather than
141 * deferring to virtqueue_notify() in the hopes that if the host is
142 * currently running on another CPU, we can keep it processing the new
145 avail_idx
= (uint16_t)(vq
->vq_ring
.avail
->idx
& (vq
->vq_nentries
- 1));
146 vq
->vq_ring
.avail
->ring
[avail_idx
] = desc_idx
;
148 vq
->vq_ring
.avail
->idx
++;
151 static inline void __attribute__((always_inline
))
152 vq_ring_free_chain(struct virtqueue
*vq
, uint16_t desc_idx
)
154 struct vring_desc
*dp
;
155 struct vq_desc_extra
*dxp
;
157 dp
= &vq
->vq_ring
.desc
[desc_idx
];
158 dxp
= &vq
->vq_descx
[desc_idx
];
159 vq
->vq_free_cnt
= (uint16_t)(vq
->vq_free_cnt
+ dxp
->ndescs
);
160 while (dp
->flags
& VRING_DESC_F_NEXT
) {
161 dp
= &vq
->vq_ring
.desc
[dp
->next
];
166 * We must append the existing free chain, if any, to the end of
167 * newly freed chain. If the virtqueue was completely used, then
168 * head would be VQ_RING_DESC_CHAIN_END (ASSERTed above).
170 dp
->next
= vq
->vq_desc_head_idx
;
171 vq
->vq_desc_head_idx
= desc_idx
;
174 static inline int __attribute__((always_inline
))
175 virtqueue_enqueue_recv_refill(struct virtqueue
*rxvq
, struct rte_mbuf
*cookie
)
177 const uint16_t needed
= 1;
178 const uint16_t head_idx
= rxvq
->vq_desc_head_idx
;
179 struct vring_desc
*start_dp
= rxvq
->vq_ring
.desc
;
180 struct vq_desc_extra
*dxp
;
182 if (unlikely(rxvq
->vq_free_cnt
== 0))
184 if (unlikely(rxvq
->vq_free_cnt
< needed
))
186 if (unlikely(head_idx
>= rxvq
->vq_nentries
))
189 dxp
= &rxvq
->vq_descx
[head_idx
];
190 dxp
->cookie
= (void *)cookie
;
191 dxp
->ndescs
= needed
;
193 start_dp
[head_idx
].addr
=
194 (uint64_t) ((uintptr_t)cookie
->buf_addr
+ RTE_PKTMBUF_HEADROOM
- sizeof(struct virtio_net_hdr
));
195 start_dp
[head_idx
].len
= cookie
->buf_len
- RTE_PKTMBUF_HEADROOM
+ sizeof(struct virtio_net_hdr
);
196 start_dp
[head_idx
].flags
= VRING_DESC_F_WRITE
;
197 rxvq
->vq_desc_head_idx
= start_dp
[head_idx
].next
;
198 rxvq
->vq_free_cnt
= (uint16_t)(rxvq
->vq_free_cnt
- needed
);
199 vq_ring_update_avail(rxvq
, head_idx
);
204 static inline int __attribute__((always_inline
))
205 virtqueue_enqueue_xmit(struct virtqueue
*txvq
, struct rte_mbuf
*cookie
)
208 const uint16_t needed
= 2;
209 struct vring_desc
*start_dp
= txvq
->vq_ring
.desc
;
210 uint16_t head_idx
= txvq
->vq_desc_head_idx
;
211 uint16_t idx
= head_idx
;
212 struct vq_desc_extra
*dxp
;
214 if (unlikely(txvq
->vq_free_cnt
== 0))
216 if (unlikely(txvq
->vq_free_cnt
< needed
))
218 if (unlikely(head_idx
>= txvq
->vq_nentries
))
221 dxp
= &txvq
->vq_descx
[idx
];
222 dxp
->cookie
= (void *)cookie
;
223 dxp
->ndescs
= needed
;
225 start_dp
= txvq
->vq_ring
.desc
;
226 start_dp
[idx
].addr
= 0;
228 * TODO: save one desc here?
230 start_dp
[idx
].len
= sizeof(struct virtio_net_hdr
);
231 start_dp
[idx
].flags
= VRING_DESC_F_NEXT
;
232 start_dp
[idx
].addr
= (uintptr_t)NULL
;
233 idx
= start_dp
[idx
].next
;
234 start_dp
[idx
].addr
= (uint64_t)rte_pktmbuf_mtod(cookie
, uintptr_t);
235 start_dp
[idx
].len
= cookie
->data_len
;
236 start_dp
[idx
].flags
= 0;
237 idx
= start_dp
[idx
].next
;
238 txvq
->vq_desc_head_idx
= idx
;
239 txvq
->vq_free_cnt
= (uint16_t)(txvq
->vq_free_cnt
- needed
);
240 vq_ring_update_avail(txvq
, head_idx
);
245 static inline uint16_t __attribute__((always_inline
))
246 virtqueue_dequeue_burst(struct virtqueue
*vq
, struct rte_mbuf
**rx_pkts
, uint32_t *len
, uint16_t num
)
248 struct vring_used_elem
*uep
;
249 struct rte_mbuf
*cookie
;
250 uint16_t used_idx
, desc_idx
;
252 /* Caller does the check */
253 for (i
= 0; i
< num
; i
++) {
254 used_idx
= (uint16_t)(vq
->vq_used_cons_idx
& (vq
->vq_nentries
- 1));
255 uep
= &vq
->vq_ring
.used
->ring
[used_idx
];
256 desc_idx
= (uint16_t) uep
->id
;
257 cookie
= (struct rte_mbuf
*)vq
->vq_descx
[desc_idx
].cookie
;
258 if (unlikely(cookie
== NULL
)) {
259 PMD_DRV_LOG(ERR
, "vring descriptor with no mbuf cookie at %u\n",
260 vq
->vq_used_cons_idx
);
261 RTE_LOG(ERR
, PMD
, "%s: inconsistent (%u, %u)\n", __func__
, used_idx
, desc_idx
);
266 vq
->vq_used_cons_idx
++;
267 vq_ring_free_chain(vq
, desc_idx
);
268 vq
->vq_descx
[desc_idx
].cookie
= NULL
;
273 #endif /* _VIRTQUEUE_H_ */