]> git.proxmox.com Git - ceph.git/blob - ceph/src/spdk/dpdk/drivers/net/virtio/virtqueue.c
import 15.2.0 Octopus source
[ceph.git] / ceph / src / spdk / dpdk / drivers / net / virtio / virtqueue.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2010-2015 Intel Corporation
3 */
4 #include <stdint.h>
5
6 #include <rte_mbuf.h>
7
8 #include "virtqueue.h"
9 #include "virtio_logs.h"
10 #include "virtio_pci.h"
11 #include "virtio_rxtx_simple.h"
12
13 /*
14 * Two types of mbuf to be cleaned:
15 * 1) mbuf that has been consumed by backend but not used by virtio.
16 * 2) mbuf that hasn't been consued by backend.
17 */
18 struct rte_mbuf *
19 virtqueue_detach_unused(struct virtqueue *vq)
20 {
21 struct rte_mbuf *cookie;
22 struct virtio_hw *hw;
23 uint16_t start, end;
24 int type, idx;
25
26 if (vq == NULL)
27 return NULL;
28
29 hw = vq->hw;
30 type = virtio_get_queue_type(hw, vq->vq_queue_index);
31 start = vq->vq_avail_idx & (vq->vq_nentries - 1);
32 end = (vq->vq_avail_idx + vq->vq_free_cnt) & (vq->vq_nentries - 1);
33
34 for (idx = 0; idx < vq->vq_nentries; idx++) {
35 if (hw->use_simple_rx && type == VTNET_RQ) {
36 if (start <= end && idx >= start && idx < end)
37 continue;
38 if (start > end && (idx >= start || idx < end))
39 continue;
40 cookie = vq->sw_ring[idx];
41 if (cookie != NULL) {
42 vq->sw_ring[idx] = NULL;
43 return cookie;
44 }
45 } else {
46 cookie = vq->vq_descx[idx].cookie;
47 if (cookie != NULL) {
48 vq->vq_descx[idx].cookie = NULL;
49 return cookie;
50 }
51 }
52 }
53
54 return NULL;
55 }
56
57 /* Flush used descs */
58 static void
59 virtqueue_rxvq_flush_packed(struct virtqueue *vq)
60 {
61 struct vq_desc_extra *dxp;
62 uint16_t i;
63
64 struct vring_packed_desc *descs = vq->vq_packed.ring.desc;
65 int cnt = 0;
66
67 i = vq->vq_used_cons_idx;
68 while (desc_is_used(&descs[i], vq) && cnt++ < vq->vq_nentries) {
69 dxp = &vq->vq_descx[descs[i].id];
70 if (dxp->cookie != NULL) {
71 rte_pktmbuf_free(dxp->cookie);
72 dxp->cookie = NULL;
73 }
74 vq->vq_free_cnt++;
75 vq->vq_used_cons_idx++;
76 if (vq->vq_used_cons_idx >= vq->vq_nentries) {
77 vq->vq_used_cons_idx -= vq->vq_nentries;
78 vq->vq_packed.used_wrap_counter ^= 1;
79 }
80 i = vq->vq_used_cons_idx;
81 }
82 }
83
84 /* Flush the elements in the used ring. */
85 static void
86 virtqueue_rxvq_flush_split(struct virtqueue *vq)
87 {
88 struct virtnet_rx *rxq = &vq->rxq;
89 struct virtio_hw *hw = vq->hw;
90 struct vring_used_elem *uep;
91 struct vq_desc_extra *dxp;
92 uint16_t used_idx, desc_idx;
93 uint16_t nb_used, i;
94
95 nb_used = VIRTQUEUE_NUSED(vq);
96
97 for (i = 0; i < nb_used; i++) {
98 used_idx = vq->vq_used_cons_idx & (vq->vq_nentries - 1);
99 uep = &vq->vq_split.ring.used->ring[used_idx];
100 if (hw->use_simple_rx) {
101 desc_idx = used_idx;
102 rte_pktmbuf_free(vq->sw_ring[desc_idx]);
103 vq->vq_free_cnt++;
104 } else if (hw->use_inorder_rx) {
105 desc_idx = (uint16_t)uep->id;
106 dxp = &vq->vq_descx[desc_idx];
107 if (dxp->cookie != NULL) {
108 rte_pktmbuf_free(dxp->cookie);
109 dxp->cookie = NULL;
110 }
111 vq_ring_free_inorder(vq, desc_idx, 1);
112 } else {
113 desc_idx = (uint16_t)uep->id;
114 dxp = &vq->vq_descx[desc_idx];
115 if (dxp->cookie != NULL) {
116 rte_pktmbuf_free(dxp->cookie);
117 dxp->cookie = NULL;
118 }
119 vq_ring_free_chain(vq, desc_idx);
120 }
121 vq->vq_used_cons_idx++;
122 }
123
124 if (hw->use_simple_rx) {
125 while (vq->vq_free_cnt >= RTE_VIRTIO_VPMD_RX_REARM_THRESH) {
126 virtio_rxq_rearm_vec(rxq);
127 if (virtqueue_kick_prepare(vq))
128 virtqueue_notify(vq);
129 }
130 }
131 }
132
133 /* Flush the elements in the used ring. */
134 void
135 virtqueue_rxvq_flush(struct virtqueue *vq)
136 {
137 struct virtio_hw *hw = vq->hw;
138
139 if (vtpci_packed_queue(hw))
140 virtqueue_rxvq_flush_packed(vq);
141 else
142 virtqueue_rxvq_flush_split(vq);
143 }