]> git.proxmox.com Git - mirror_qemu.git/blame - net/vhost-vdpa.c
vdpa: Move command buffers map to start of net device
[mirror_qemu.git] / net / vhost-vdpa.c
CommitLineData
1e0a84ea
CL
1/*
2 * vhost-vdpa.c
3 *
4 * Copyright(c) 2017-2018 Intel Corporation.
5 * Copyright(c) 2020 Red Hat, Inc.
6 *
7 * This work is licensed under the terms of the GNU GPL, version 2 or later.
8 * See the COPYING file in the top-level directory.
9 *
10 */
11
12#include "qemu/osdep.h"
13#include "clients.h"
bd907ae4 14#include "hw/virtio/virtio-net.h"
1e0a84ea
CL
15#include "net/vhost_net.h"
16#include "net/vhost-vdpa.h"
17#include "hw/virtio/vhost-vdpa.h"
18#include "qemu/config-file.h"
19#include "qemu/error-report.h"
bd907ae4
EP
20#include "qemu/log.h"
21#include "qemu/memalign.h"
1e0a84ea
CL
22#include "qemu/option.h"
23#include "qapi/error.h"
40237840 24#include <linux/vhost.h>
1e0a84ea
CL
25#include <sys/ioctl.h>
26#include <err.h>
27#include "standard-headers/linux/virtio_net.h"
28#include "monitor/monitor.h"
29#include "hw/virtio/vhost.h"
30
31/* Todo:need to add the multiqueue support here */
32typedef struct VhostVDPAState {
33 NetClientState nc;
34 struct vhost_vdpa vhost_vdpa;
35 VHostNetState *vhost_net;
2df4dd31
EP
36
37 /* Control commands shadow buffers */
38 void *cvq_cmd_out_buffer, *cvq_cmd_in_buffer;
1e0a84ea
CL
39 bool started;
40} VhostVDPAState;
41
42const int vdpa_feature_bits[] = {
43 VIRTIO_F_NOTIFY_ON_EMPTY,
44 VIRTIO_RING_F_INDIRECT_DESC,
45 VIRTIO_RING_F_EVENT_IDX,
46 VIRTIO_F_ANY_LAYOUT,
47 VIRTIO_F_VERSION_1,
48 VIRTIO_NET_F_CSUM,
49 VIRTIO_NET_F_GUEST_CSUM,
50 VIRTIO_NET_F_GSO,
51 VIRTIO_NET_F_GUEST_TSO4,
52 VIRTIO_NET_F_GUEST_TSO6,
53 VIRTIO_NET_F_GUEST_ECN,
54 VIRTIO_NET_F_GUEST_UFO,
55 VIRTIO_NET_F_HOST_TSO4,
56 VIRTIO_NET_F_HOST_TSO6,
57 VIRTIO_NET_F_HOST_ECN,
58 VIRTIO_NET_F_HOST_UFO,
59 VIRTIO_NET_F_MRG_RXBUF,
60 VIRTIO_NET_F_MTU,
40237840
JW
61 VIRTIO_NET_F_CTRL_RX,
62 VIRTIO_NET_F_CTRL_RX_EXTRA,
63 VIRTIO_NET_F_CTRL_VLAN,
64 VIRTIO_NET_F_GUEST_ANNOUNCE,
65 VIRTIO_NET_F_CTRL_MAC_ADDR,
66 VIRTIO_NET_F_RSS,
67 VIRTIO_NET_F_MQ,
68 VIRTIO_NET_F_CTRL_VQ,
1e0a84ea
CL
69 VIRTIO_F_IOMMU_PLATFORM,
70 VIRTIO_F_RING_PACKED,
0145c393
AM
71 VIRTIO_NET_F_RSS,
72 VIRTIO_NET_F_HASH_REPORT,
1e0a84ea 73 VIRTIO_NET_F_GUEST_ANNOUNCE,
9aa47edd 74 VIRTIO_NET_F_STATUS,
1e0a84ea
CL
75 VHOST_INVALID_FEATURE_BIT
76};
77
1576dbb5
EP
78/** Supported device specific feature bits with SVQ */
79static const uint64_t vdpa_svq_device_features =
80 BIT_ULL(VIRTIO_NET_F_CSUM) |
81 BIT_ULL(VIRTIO_NET_F_GUEST_CSUM) |
82 BIT_ULL(VIRTIO_NET_F_MTU) |
83 BIT_ULL(VIRTIO_NET_F_MAC) |
84 BIT_ULL(VIRTIO_NET_F_GUEST_TSO4) |
85 BIT_ULL(VIRTIO_NET_F_GUEST_TSO6) |
86 BIT_ULL(VIRTIO_NET_F_GUEST_ECN) |
87 BIT_ULL(VIRTIO_NET_F_GUEST_UFO) |
88 BIT_ULL(VIRTIO_NET_F_HOST_TSO4) |
89 BIT_ULL(VIRTIO_NET_F_HOST_TSO6) |
90 BIT_ULL(VIRTIO_NET_F_HOST_ECN) |
91 BIT_ULL(VIRTIO_NET_F_HOST_UFO) |
92 BIT_ULL(VIRTIO_NET_F_MRG_RXBUF) |
93 BIT_ULL(VIRTIO_NET_F_STATUS) |
94 BIT_ULL(VIRTIO_NET_F_CTRL_VQ) |
95 BIT_ULL(VIRTIO_F_ANY_LAYOUT) |
96 BIT_ULL(VIRTIO_NET_F_CTRL_MAC_ADDR) |
97 BIT_ULL(VIRTIO_NET_F_RSC_EXT) |
98 BIT_ULL(VIRTIO_NET_F_STANDBY);
99
1e0a84ea
CL
100VHostNetState *vhost_vdpa_get_vhost_net(NetClientState *nc)
101{
102 VhostVDPAState *s = DO_UPCAST(VhostVDPAState, nc, nc);
103 assert(nc->info->type == NET_CLIENT_DRIVER_VHOST_VDPA);
104 return s->vhost_net;
105}
106
1e0a84ea
CL
107static int vhost_vdpa_net_check_device_id(struct vhost_net *net)
108{
109 uint32_t device_id;
110 int ret;
111 struct vhost_dev *hdev;
112
113 hdev = (struct vhost_dev *)&net->dev;
114 ret = hdev->vhost_ops->vhost_get_device_id(hdev, &device_id);
115 if (device_id != VIRTIO_ID_NET) {
116 return -ENOTSUP;
117 }
118 return ret;
119}
120
40237840
JW
121static int vhost_vdpa_add(NetClientState *ncs, void *be,
122 int queue_pair_index, int nvqs)
1e0a84ea
CL
123{
124 VhostNetOptions options;
125 struct vhost_net *net = NULL;
126 VhostVDPAState *s;
127 int ret;
128
129 options.backend_type = VHOST_BACKEND_TYPE_VDPA;
130 assert(ncs->info->type == NET_CLIENT_DRIVER_VHOST_VDPA);
131 s = DO_UPCAST(VhostVDPAState, nc, ncs);
132 options.net_backend = ncs;
133 options.opaque = be;
134 options.busyloop_timeout = 0;
40237840 135 options.nvqs = nvqs;
1e0a84ea
CL
136
137 net = vhost_net_init(&options);
138 if (!net) {
139 error_report("failed to init vhost_net for queue");
a97ef87a 140 goto err_init;
1e0a84ea 141 }
1e0a84ea
CL
142 s->vhost_net = net;
143 ret = vhost_vdpa_net_check_device_id(net);
144 if (ret) {
a97ef87a 145 goto err_check;
1e0a84ea
CL
146 }
147 return 0;
a97ef87a
JW
148err_check:
149 vhost_net_cleanup(net);
150 g_free(net);
151err_init:
1e0a84ea
CL
152 return -1;
153}
154
155static void vhost_vdpa_cleanup(NetClientState *nc)
156{
157 VhostVDPAState *s = DO_UPCAST(VhostVDPAState, nc, nc);
1576dbb5 158 struct vhost_dev *dev = &s->vhost_net->dev;
1e0a84ea 159
2df4dd31
EP
160 qemu_vfree(s->cvq_cmd_out_buffer);
161 qemu_vfree(s->cvq_cmd_in_buffer);
1576dbb5
EP
162 if (dev->vq_index + dev->nvqs == dev->vq_index_end) {
163 g_clear_pointer(&s->vhost_vdpa.iova_tree, vhost_iova_tree_delete);
164 }
1e0a84ea
CL
165 if (s->vhost_net) {
166 vhost_net_cleanup(s->vhost_net);
167 g_free(s->vhost_net);
168 s->vhost_net = NULL;
169 }
57b3a7d8
CL
170 if (s->vhost_vdpa.device_fd >= 0) {
171 qemu_close(s->vhost_vdpa.device_fd);
172 s->vhost_vdpa.device_fd = -1;
173 }
1e0a84ea
CL
174}
175
176static bool vhost_vdpa_has_vnet_hdr(NetClientState *nc)
177{
178 assert(nc->info->type == NET_CLIENT_DRIVER_VHOST_VDPA);
179
180 return true;
181}
182
183static bool vhost_vdpa_has_ufo(NetClientState *nc)
184{
185 assert(nc->info->type == NET_CLIENT_DRIVER_VHOST_VDPA);
186 VhostVDPAState *s = DO_UPCAST(VhostVDPAState, nc, nc);
187 uint64_t features = 0;
188 features |= (1ULL << VIRTIO_NET_F_HOST_UFO);
189 features = vhost_net_get_features(s->vhost_net, features);
190 return !!(features & (1ULL << VIRTIO_NET_F_HOST_UFO));
191
192}
193
ee8a1c63
KW
194static bool vhost_vdpa_check_peer_type(NetClientState *nc, ObjectClass *oc,
195 Error **errp)
196{
197 const char *driver = object_class_get_name(oc);
198
199 if (!g_str_has_prefix(driver, "virtio-net-")) {
200 error_setg(errp, "vhost-vdpa requires frontend driver virtio-net-*");
201 return false;
202 }
203
204 return true;
205}
206
846a1e85
EP
207/** Dummy receive in case qemu falls back to userland tap networking */
208static ssize_t vhost_vdpa_receive(NetClientState *nc, const uint8_t *buf,
209 size_t size)
210{
211 return 0;
212}
213
1e0a84ea
CL
214static NetClientInfo net_vhost_vdpa_info = {
215 .type = NET_CLIENT_DRIVER_VHOST_VDPA,
216 .size = sizeof(VhostVDPAState),
846a1e85 217 .receive = vhost_vdpa_receive,
1e0a84ea
CL
218 .cleanup = vhost_vdpa_cleanup,
219 .has_vnet_hdr = vhost_vdpa_has_vnet_hdr,
220 .has_ufo = vhost_vdpa_has_ufo,
ee8a1c63 221 .check_peer_type = vhost_vdpa_check_peer_type,
1e0a84ea
CL
222};
223
2df4dd31
EP
224static void vhost_vdpa_cvq_unmap_buf(struct vhost_vdpa *v, void *addr)
225{
226 VhostIOVATree *tree = v->iova_tree;
227 DMAMap needle = {
228 /*
229 * No need to specify size or to look for more translations since
230 * this contiguous chunk was allocated by us.
231 */
232 .translated_addr = (hwaddr)(uintptr_t)addr,
233 };
234 const DMAMap *map = vhost_iova_tree_find_iova(tree, &needle);
235 int r;
236
237 if (unlikely(!map)) {
238 error_report("Cannot locate expected map");
239 return;
240 }
241
242 r = vhost_vdpa_dma_unmap(v, map->iova, map->size + 1);
243 if (unlikely(r != 0)) {
244 error_report("Device cannot unmap: %s(%d)", g_strerror(r), r);
245 }
246
69292a8e 247 vhost_iova_tree_remove(tree, *map);
2df4dd31
EP
248}
249
250static size_t vhost_vdpa_net_cvq_cmd_len(void)
251{
252 /*
253 * MAC_TABLE_SET is the ctrl command that produces the longer out buffer.
254 * In buffer is always 1 byte, so it should fit here
255 */
256 return sizeof(struct virtio_net_ctrl_hdr) +
257 2 * sizeof(struct virtio_net_ctrl_mac) +
258 MAC_TABLE_ENTRIES * ETH_ALEN;
259}
260
261static size_t vhost_vdpa_net_cvq_cmd_page_len(void)
262{
263 return ROUND_UP(vhost_vdpa_net_cvq_cmd_len(), qemu_real_host_page_size());
264}
265
7a7f87e9
EP
266/** Map CVQ buffer. */
267static int vhost_vdpa_cvq_map_buf(struct vhost_vdpa *v, void *buf, size_t size,
268 bool write)
2df4dd31
EP
269{
270 DMAMap map = {};
271 int r;
272
2df4dd31 273 map.translated_addr = (hwaddr)(uintptr_t)buf;
7a7f87e9 274 map.size = size - 1;
2df4dd31
EP
275 map.perm = write ? IOMMU_RW : IOMMU_RO,
276 r = vhost_iova_tree_map_alloc(v->iova_tree, &map);
277 if (unlikely(r != IOVA_OK)) {
278 error_report("Cannot map injected element");
7a7f87e9 279 return r;
2df4dd31
EP
280 }
281
282 r = vhost_vdpa_dma_map(v, map.iova, vhost_vdpa_net_cvq_cmd_page_len(), buf,
283 !write);
284 if (unlikely(r < 0)) {
285 goto dma_map_err;
286 }
287
7a7f87e9 288 return 0;
2df4dd31
EP
289
290dma_map_err:
69292a8e 291 vhost_iova_tree_remove(v->iova_tree, map);
7a7f87e9 292 return r;
2df4dd31
EP
293}
294
7a7f87e9 295static int vhost_vdpa_net_cvq_start(NetClientState *nc)
2df4dd31 296{
7a7f87e9
EP
297 VhostVDPAState *s;
298 int r;
2df4dd31 299
7a7f87e9
EP
300 assert(nc->info->type == NET_CLIENT_DRIVER_VHOST_VDPA);
301
302 s = DO_UPCAST(VhostVDPAState, nc, nc);
303 if (!s->vhost_vdpa.shadow_vqs_enabled) {
304 return 0;
2df4dd31
EP
305 }
306
7a7f87e9
EP
307 r = vhost_vdpa_cvq_map_buf(&s->vhost_vdpa, s->cvq_cmd_out_buffer,
308 vhost_vdpa_net_cvq_cmd_page_len(), false);
309 if (unlikely(r < 0)) {
310 return r;
311 }
312
313 r = vhost_vdpa_cvq_map_buf(&s->vhost_vdpa, s->cvq_cmd_in_buffer,
314 vhost_vdpa_net_cvq_cmd_page_len(), true);
315 if (unlikely(r < 0)) {
2df4dd31 316 vhost_vdpa_cvq_unmap_buf(&s->vhost_vdpa, s->cvq_cmd_out_buffer);
2df4dd31
EP
317 }
318
7a7f87e9
EP
319 return r;
320}
321
322static void vhost_vdpa_net_cvq_stop(NetClientState *nc)
323{
324 VhostVDPAState *s = DO_UPCAST(VhostVDPAState, nc, nc);
325
326 assert(nc->info->type == NET_CLIENT_DRIVER_VHOST_VDPA);
327
328 if (s->vhost_vdpa.shadow_vqs_enabled) {
329 vhost_vdpa_cvq_unmap_buf(&s->vhost_vdpa, s->cvq_cmd_out_buffer);
330 vhost_vdpa_cvq_unmap_buf(&s->vhost_vdpa, s->cvq_cmd_in_buffer);
331 }
2df4dd31
EP
332}
333
f8972b56
EP
334static NetClientInfo net_vhost_vdpa_cvq_info = {
335 .type = NET_CLIENT_DRIVER_VHOST_VDPA,
336 .size = sizeof(VhostVDPAState),
337 .receive = vhost_vdpa_receive,
7a7f87e9
EP
338 .start = vhost_vdpa_net_cvq_start,
339 .stop = vhost_vdpa_net_cvq_stop,
f8972b56
EP
340 .cleanup = vhost_vdpa_cleanup,
341 .has_vnet_hdr = vhost_vdpa_has_vnet_hdr,
342 .has_ufo = vhost_vdpa_has_ufo,
343 .check_peer_type = vhost_vdpa_check_peer_type,
344};
345
2df4dd31
EP
346/**
347 * Do not forward commands not supported by SVQ. Otherwise, the device could
348 * accept it and qemu would not know how to update the device model.
349 */
7a7f87e9 350static bool vhost_vdpa_net_cvq_validate_cmd(const void *out_buf, size_t len)
2df4dd31
EP
351{
352 struct virtio_net_ctrl_hdr ctrl;
2df4dd31 353
7a7f87e9 354 if (unlikely(len < sizeof(ctrl))) {
2df4dd31 355 qemu_log_mask(LOG_GUEST_ERROR,
7a7f87e9 356 "%s: invalid legnth of out buffer %zu\n", __func__, len);
2df4dd31
EP
357 return false;
358 }
359
7a7f87e9 360 memcpy(&ctrl, out_buf, sizeof(ctrl));
2df4dd31
EP
361 switch (ctrl.class) {
362 case VIRTIO_NET_CTRL_MAC:
363 switch (ctrl.cmd) {
364 case VIRTIO_NET_CTRL_MAC_ADDR_SET:
365 return true;
366 default:
367 qemu_log_mask(LOG_GUEST_ERROR, "%s: invalid mac cmd %u\n",
368 __func__, ctrl.cmd);
369 };
370 break;
371 default:
372 qemu_log_mask(LOG_GUEST_ERROR, "%s: invalid control class %u\n",
373 __func__, ctrl.class);
374 };
375
376 return false;
377}
378
379/**
380 * Validate and copy control virtqueue commands.
381 *
382 * Following QEMU guidelines, we offer a copy of the buffers to the device to
383 * prevent TOCTOU bugs.
bd907ae4
EP
384 */
385static int vhost_vdpa_net_handle_ctrl_avail(VhostShadowVirtqueue *svq,
386 VirtQueueElement *elem,
387 void *opaque)
388{
2df4dd31 389 VhostVDPAState *s = opaque;
bd907ae4
EP
390 size_t in_len, dev_written;
391 virtio_net_ctrl_ack status = VIRTIO_NET_ERR;
7a7f87e9
EP
392 /* Out buffer sent to both the vdpa device and the device model */
393 struct iovec out = {
394 .iov_base = s->cvq_cmd_out_buffer,
395 };
396 /* In buffer sent to the device */
397 const struct iovec dev_in = {
398 .iov_base = s->cvq_cmd_in_buffer,
399 .iov_len = sizeof(virtio_net_ctrl_ack),
2df4dd31
EP
400 };
401 /* in buffer used for device model */
402 const struct iovec in = {
403 .iov_base = &status,
404 .iov_len = sizeof(status),
405 };
406 int r = -EINVAL;
407 bool ok;
408
7a7f87e9
EP
409 out.iov_len = iov_to_buf(elem->out_sg, elem->out_num, 0,
410 s->cvq_cmd_out_buffer,
411 vhost_vdpa_net_cvq_cmd_len());
412 ok = vhost_vdpa_net_cvq_validate_cmd(s->cvq_cmd_out_buffer, out.iov_len);
2df4dd31
EP
413 if (unlikely(!ok)) {
414 goto out;
415 }
bd907ae4 416
7a7f87e9 417 r = vhost_svq_add(svq, &out, 1, &dev_in, 1, elem);
bd907ae4
EP
418 if (unlikely(r != 0)) {
419 if (unlikely(r == -ENOSPC)) {
420 qemu_log_mask(LOG_GUEST_ERROR, "%s: No space on device queue\n",
421 __func__);
422 }
423 goto out;
424 }
425
426 /*
427 * We can poll here since we've had BQL from the time we sent the
428 * descriptor. Also, we need to take the answer before SVQ pulls by itself,
429 * when BQL is released
430 */
431 dev_written = vhost_svq_poll(svq);
432 if (unlikely(dev_written < sizeof(status))) {
433 error_report("Insufficient written data (%zu)", dev_written);
2df4dd31
EP
434 goto out;
435 }
436
7a7f87e9 437 memcpy(&status, s->cvq_cmd_in_buffer, sizeof(status));
2df4dd31
EP
438 if (status != VIRTIO_NET_OK) {
439 goto out;
440 }
441
442 status = VIRTIO_NET_ERR;
7a7f87e9 443 virtio_net_handle_ctrl_iov(svq->vdev, &in, 1, &out, 1);
2df4dd31
EP
444 if (status != VIRTIO_NET_OK) {
445 error_report("Bad CVQ processing in model");
bd907ae4
EP
446 }
447
448out:
449 in_len = iov_from_buf(elem->in_sg, elem->in_num, 0, &status,
450 sizeof(status));
451 if (unlikely(in_len < sizeof(status))) {
452 error_report("Bad device CVQ written length");
453 }
454 vhost_svq_push_elem(svq, elem, MIN(in_len, sizeof(status)));
455 g_free(elem);
456 return r;
457}
458
459static const VhostShadowVirtqueueOps vhost_vdpa_net_svq_ops = {
460 .avail_handler = vhost_vdpa_net_handle_ctrl_avail,
461};
462
654790b6
JW
463static NetClientState *net_vhost_vdpa_init(NetClientState *peer,
464 const char *device,
465 const char *name,
40237840
JW
466 int vdpa_device_fd,
467 int queue_pair_index,
468 int nvqs,
1576dbb5
EP
469 bool is_datapath,
470 bool svq,
471 VhostIOVATree *iova_tree)
1e0a84ea
CL
472{
473 NetClientState *nc = NULL;
474 VhostVDPAState *s;
1e0a84ea
CL
475 int ret = 0;
476 assert(name);
40237840
JW
477 if (is_datapath) {
478 nc = qemu_new_net_client(&net_vhost_vdpa_info, peer, device,
479 name);
480 } else {
f8972b56 481 nc = qemu_new_net_control_client(&net_vhost_vdpa_cvq_info, peer,
40237840
JW
482 device, name);
483 }
56e6f594 484 snprintf(nc->info_str, sizeof(nc->info_str), TYPE_VHOST_VDPA);
1e0a84ea 485 s = DO_UPCAST(VhostVDPAState, nc, nc);
7327813d 486
1e0a84ea 487 s->vhost_vdpa.device_fd = vdpa_device_fd;
40237840 488 s->vhost_vdpa.index = queue_pair_index;
1576dbb5
EP
489 s->vhost_vdpa.shadow_vqs_enabled = svq;
490 s->vhost_vdpa.iova_tree = iova_tree;
bd907ae4 491 if (!is_datapath) {
2df4dd31
EP
492 s->cvq_cmd_out_buffer = qemu_memalign(qemu_real_host_page_size(),
493 vhost_vdpa_net_cvq_cmd_page_len());
494 memset(s->cvq_cmd_out_buffer, 0, vhost_vdpa_net_cvq_cmd_page_len());
495 s->cvq_cmd_in_buffer = qemu_memalign(qemu_real_host_page_size(),
496 vhost_vdpa_net_cvq_cmd_page_len());
497 memset(s->cvq_cmd_in_buffer, 0, vhost_vdpa_net_cvq_cmd_page_len());
498
bd907ae4
EP
499 s->vhost_vdpa.shadow_vq_ops = &vhost_vdpa_net_svq_ops;
500 s->vhost_vdpa.shadow_vq_ops_opaque = s;
1576dbb5
EP
501 error_setg(&s->vhost_vdpa.migration_blocker,
502 "Migration disabled: vhost-vdpa uses CVQ.");
bd907ae4 503 }
40237840 504 ret = vhost_vdpa_add(nc, (void *)&s->vhost_vdpa, queue_pair_index, nvqs);
74af5eec 505 if (ret) {
74af5eec 506 qemu_del_net_client(nc);
654790b6 507 return NULL;
74af5eec 508 }
654790b6 509 return nc;
1e0a84ea
CL
510}
511
1576dbb5
EP
512static int vhost_vdpa_get_iova_range(int fd,
513 struct vhost_vdpa_iova_range *iova_range)
514{
515 int ret = ioctl(fd, VHOST_VDPA_GET_IOVA_RANGE, iova_range);
516
517 return ret < 0 ? -errno : 0;
518}
519
8170ab3f
EP
520static int vhost_vdpa_get_features(int fd, uint64_t *features, Error **errp)
521{
522 int ret = ioctl(fd, VHOST_GET_FEATURES, features);
523 if (unlikely(ret < 0)) {
524 error_setg_errno(errp, errno,
525 "Fail to query features from vhost-vDPA device");
526 }
527 return ret;
528}
529
530static int vhost_vdpa_get_max_queue_pairs(int fd, uint64_t features,
531 int *has_cvq, Error **errp)
40237840
JW
532{
533 unsigned long config_size = offsetof(struct vhost_vdpa_config, buf);
cd523a41 534 g_autofree struct vhost_vdpa_config *config = NULL;
40237840 535 __virtio16 *max_queue_pairs;
40237840
JW
536 int ret;
537
40237840
JW
538 if (features & (1 << VIRTIO_NET_F_CTRL_VQ)) {
539 *has_cvq = 1;
540 } else {
541 *has_cvq = 0;
542 }
543
544 if (features & (1 << VIRTIO_NET_F_MQ)) {
545 config = g_malloc0(config_size + sizeof(*max_queue_pairs));
546 config->off = offsetof(struct virtio_net_config, max_virtqueue_pairs);
547 config->len = sizeof(*max_queue_pairs);
548
549 ret = ioctl(fd, VHOST_VDPA_GET_CONFIG, config);
550 if (ret) {
551 error_setg(errp, "Fail to get config from vhost-vDPA device");
552 return -ret;
553 }
554
555 max_queue_pairs = (__virtio16 *)&config->buf;
556
557 return lduw_le_p(max_queue_pairs);
558 }
559
560 return 1;
561}
562
1e0a84ea
CL
563int net_init_vhost_vdpa(const Netdev *netdev, const char *name,
564 NetClientState *peer, Error **errp)
565{
566 const NetdevVhostVDPAOptions *opts;
8170ab3f 567 uint64_t features;
654790b6 568 int vdpa_device_fd;
eb3cb751 569 g_autofree NetClientState **ncs = NULL;
1576dbb5 570 g_autoptr(VhostIOVATree) iova_tree = NULL;
eb3cb751 571 NetClientState *nc;
aed5da45 572 int queue_pairs, r, i = 0, has_cvq = 0;
1e0a84ea
CL
573
574 assert(netdev->type == NET_CLIENT_DRIVER_VHOST_VDPA);
575 opts = &netdev->u.vhost_vdpa;
c8295404
EP
576 if (!opts->vhostdev) {
577 error_setg(errp, "vdpa character device not specified with vhostdev");
578 return -1;
579 }
7327813d 580
0351152b 581 vdpa_device_fd = qemu_open(opts->vhostdev, O_RDWR, errp);
7327813d
JW
582 if (vdpa_device_fd == -1) {
583 return -errno;
584 }
585
8170ab3f
EP
586 r = vhost_vdpa_get_features(vdpa_device_fd, &features, errp);
587 if (unlikely(r < 0)) {
aed5da45 588 goto err;
8170ab3f
EP
589 }
590
591 queue_pairs = vhost_vdpa_get_max_queue_pairs(vdpa_device_fd, features,
40237840
JW
592 &has_cvq, errp);
593 if (queue_pairs < 0) {
7327813d 594 qemu_close(vdpa_device_fd);
40237840
JW
595 return queue_pairs;
596 }
597
1576dbb5
EP
598 if (opts->x_svq) {
599 struct vhost_vdpa_iova_range iova_range;
600
601 uint64_t invalid_dev_features =
602 features & ~vdpa_svq_device_features &
603 /* Transport are all accepted at this point */
604 ~MAKE_64BIT_MASK(VIRTIO_TRANSPORT_F_START,
605 VIRTIO_TRANSPORT_F_END - VIRTIO_TRANSPORT_F_START);
606
607 if (invalid_dev_features) {
608 error_setg(errp, "vdpa svq does not work with features 0x%" PRIx64,
609 invalid_dev_features);
610 goto err_svq;
611 }
612
613 vhost_vdpa_get_iova_range(vdpa_device_fd, &iova_range);
614 iova_tree = vhost_iova_tree_new(iova_range.first, iova_range.last);
615 }
616
40237840
JW
617 ncs = g_malloc0(sizeof(*ncs) * queue_pairs);
618
619 for (i = 0; i < queue_pairs; i++) {
620 ncs[i] = net_vhost_vdpa_init(peer, TYPE_VHOST_VDPA, name,
1576dbb5
EP
621 vdpa_device_fd, i, 2, true, opts->x_svq,
622 iova_tree);
40237840
JW
623 if (!ncs[i])
624 goto err;
7327813d
JW
625 }
626
40237840
JW
627 if (has_cvq) {
628 nc = net_vhost_vdpa_init(peer, TYPE_VHOST_VDPA, name,
1576dbb5
EP
629 vdpa_device_fd, i, 1, false,
630 opts->x_svq, iova_tree);
40237840
JW
631 if (!nc)
632 goto err;
633 }
634
1576dbb5
EP
635 /* iova_tree ownership belongs to last NetClientState */
636 g_steal_pointer(&iova_tree);
654790b6 637 return 0;
40237840
JW
638
639err:
640 if (i) {
9bd05507
SWL
641 for (i--; i >= 0; i--) {
642 qemu_del_net_client(ncs[i]);
643 }
40237840 644 }
1576dbb5
EP
645
646err_svq:
40237840 647 qemu_close(vdpa_device_fd);
40237840
JW
648
649 return -1;
1e0a84ea 650}