]> git.proxmox.com Git - ceph.git/blame - ceph/src/spdk/dpdk/lib/librte_vhost/vdpa.c
import 15.2.0 Octopus source
[ceph.git] / ceph / src / spdk / dpdk / lib / librte_vhost / vdpa.c
CommitLineData
11fdf7f2
TL
1/* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2018 Intel Corporation
3 */
4
5/**
6 * @file
7 *
8 * Device specific vhost lib
9 */
10
11#include <stdbool.h>
12
13#include <rte_malloc.h>
14#include "rte_vdpa.h"
15#include "vhost.h"
16
17static struct rte_vdpa_device *vdpa_devices[MAX_VHOST_DEVICE];
18static uint32_t vdpa_device_num;
19
20static bool
21is_same_vdpa_device(struct rte_vdpa_dev_addr *a,
22 struct rte_vdpa_dev_addr *b)
23{
24 bool ret = true;
25
26 if (a->type != b->type)
27 return false;
28
29 switch (a->type) {
30 case PCI_ADDR:
31 if (a->pci_addr.domain != b->pci_addr.domain ||
32 a->pci_addr.bus != b->pci_addr.bus ||
33 a->pci_addr.devid != b->pci_addr.devid ||
34 a->pci_addr.function != b->pci_addr.function)
35 ret = false;
36 break;
37 default:
38 break;
39 }
40
41 return ret;
42}
43
44int
45rte_vdpa_register_device(struct rte_vdpa_dev_addr *addr,
46 struct rte_vdpa_dev_ops *ops)
47{
48 struct rte_vdpa_device *dev;
49 char device_name[MAX_VDPA_NAME_LEN];
50 int i;
51
9f95a23c 52 if (vdpa_device_num >= MAX_VHOST_DEVICE || addr == NULL || ops == NULL)
11fdf7f2
TL
53 return -1;
54
55 for (i = 0; i < MAX_VHOST_DEVICE; i++) {
56 dev = vdpa_devices[i];
57 if (dev && is_same_vdpa_device(&dev->addr, addr))
58 return -1;
59 }
60
61 for (i = 0; i < MAX_VHOST_DEVICE; i++) {
62 if (vdpa_devices[i] == NULL)
63 break;
64 }
65
9f95a23c
TL
66 if (i == MAX_VHOST_DEVICE)
67 return -1;
68
69 snprintf(device_name, sizeof(device_name), "vdpa-dev-%d", i);
11fdf7f2
TL
70 dev = rte_zmalloc(device_name, sizeof(struct rte_vdpa_device),
71 RTE_CACHE_LINE_SIZE);
72 if (!dev)
73 return -1;
74
75 memcpy(&dev->addr, addr, sizeof(struct rte_vdpa_dev_addr));
76 dev->ops = ops;
77 vdpa_devices[i] = dev;
78 vdpa_device_num++;
79
80 return i;
81}
82
83int
84rte_vdpa_unregister_device(int did)
85{
86 if (did < 0 || did >= MAX_VHOST_DEVICE || vdpa_devices[did] == NULL)
87 return -1;
88
89 rte_free(vdpa_devices[did]);
90 vdpa_devices[did] = NULL;
91 vdpa_device_num--;
92
93 return did;
94}
95
96int
97rte_vdpa_find_device_id(struct rte_vdpa_dev_addr *addr)
98{
99 struct rte_vdpa_device *dev;
100 int i;
101
9f95a23c
TL
102 if (addr == NULL)
103 return -1;
104
11fdf7f2
TL
105 for (i = 0; i < MAX_VHOST_DEVICE; ++i) {
106 dev = vdpa_devices[i];
107 if (dev && is_same_vdpa_device(&dev->addr, addr))
108 return i;
109 }
110
111 return -1;
112}
113
114struct rte_vdpa_device *
115rte_vdpa_get_device(int did)
116{
117 if (did < 0 || did >= MAX_VHOST_DEVICE)
118 return NULL;
119
120 return vdpa_devices[did];
121}
9f95a23c
TL
122
123int
124rte_vdpa_get_device_num(void)
125{
126 return vdpa_device_num;
127}
128
129int __rte_experimental
130rte_vdpa_relay_vring_used(int vid, uint16_t qid, void *vring_m)
131{
132 struct virtio_net *dev = get_device(vid);
133 uint16_t idx, idx_m, desc_id;
134 struct vhost_virtqueue *vq;
135 struct vring_desc desc;
136 struct vring_desc *desc_ring;
137 struct vring_desc *idesc = NULL;
138 struct vring *s_vring;
139 uint64_t dlen;
140 uint32_t nr_descs;
141 int ret;
142
143 if (!dev || !vring_m)
144 return -1;
145
146 if (qid >= dev->nr_vring)
147 return -1;
148
149 if (vq_is_packed(dev))
150 return -1;
151
152 s_vring = (struct vring *)vring_m;
153 vq = dev->virtqueue[qid];
154 idx = vq->used->idx;
155 idx_m = s_vring->used->idx;
156 ret = (uint16_t)(idx_m - idx);
157
158 while (idx != idx_m) {
159 /* copy used entry, used ring logging is not covered here */
160 vq->used->ring[idx & (vq->size - 1)] =
161 s_vring->used->ring[idx & (vq->size - 1)];
162
163 desc_id = vq->used->ring[idx & (vq->size - 1)].id;
164 desc_ring = vq->desc;
165 nr_descs = vq->size;
166
167 if (unlikely(desc_id >= vq->size))
168 return -1;
169
170 if (vq->desc[desc_id].flags & VRING_DESC_F_INDIRECT) {
171 dlen = vq->desc[desc_id].len;
172 nr_descs = dlen / sizeof(struct vring_desc);
173 if (unlikely(nr_descs > vq->size))
174 return -1;
175
176 desc_ring = (struct vring_desc *)(uintptr_t)
177 vhost_iova_to_vva(dev, vq,
178 vq->desc[desc_id].addr, &dlen,
179 VHOST_ACCESS_RO);
180 if (unlikely(!desc_ring))
181 return -1;
182
183 if (unlikely(dlen < vq->desc[desc_id].len)) {
184 idesc = alloc_copy_ind_table(dev, vq,
185 vq->desc[desc_id].addr,
186 vq->desc[desc_id].len);
187 if (unlikely(!idesc))
188 return -1;
189
190 desc_ring = idesc;
191 }
192
193 desc_id = 0;
194 }
195
196 /* dirty page logging for DMA writeable buffer */
197 do {
198 if (unlikely(desc_id >= vq->size))
199 goto fail;
200 if (unlikely(nr_descs-- == 0))
201 goto fail;
202 desc = desc_ring[desc_id];
203 if (desc.flags & VRING_DESC_F_WRITE)
204 vhost_log_write(dev, desc.addr, desc.len);
205 desc_id = desc.next;
206 } while (desc.flags & VRING_DESC_F_NEXT);
207
208 if (unlikely(idesc)) {
209 free_ind_table(idesc);
210 idesc = NULL;
211 }
212
213 idx++;
214 }
215
216 rte_smp_wmb();
217 vq->used->idx = idx_m;
218
219 if (dev->features & (1ULL << VIRTIO_RING_F_EVENT_IDX))
220 vring_used_event(s_vring) = idx_m;
221
222 return ret;
223
224fail:
225 if (unlikely(idesc))
226 free_ind_table(idesc);
227 return -1;
228}