]> git.proxmox.com Git - mirror_qemu.git/blob - net/vhost-vdpa.c
vdpa: map shadow vrings with MAP_SHARED
[mirror_qemu.git] / net / vhost-vdpa.c
1 /*
2 * vhost-vdpa.c
3 *
4 * Copyright(c) 2017-2018 Intel Corporation.
5 * Copyright(c) 2020 Red Hat, Inc.
6 *
7 * This work is licensed under the terms of the GNU GPL, version 2 or later.
8 * See the COPYING file in the top-level directory.
9 *
10 */
11
12 #include "qemu/osdep.h"
13 #include "clients.h"
14 #include "hw/virtio/virtio-net.h"
15 #include "net/vhost_net.h"
16 #include "net/vhost-vdpa.h"
17 #include "hw/virtio/vhost-vdpa.h"
18 #include "qemu/config-file.h"
19 #include "qemu/error-report.h"
20 #include "qemu/log.h"
21 #include "qemu/memalign.h"
22 #include "qemu/option.h"
23 #include "qapi/error.h"
24 #include <linux/vhost.h>
25 #include <sys/ioctl.h>
26 #include <err.h>
27 #include "standard-headers/linux/virtio_net.h"
28 #include "monitor/monitor.h"
29 #include "migration/migration.h"
30 #include "migration/misc.h"
31 #include "hw/virtio/vhost.h"
32
33 /* Todo:need to add the multiqueue support here */
34 typedef struct VhostVDPAState {
35 NetClientState nc;
36 struct vhost_vdpa vhost_vdpa;
37 Notifier migration_state;
38 VHostNetState *vhost_net;
39
40 /* Control commands shadow buffers */
41 void *cvq_cmd_out_buffer;
42 virtio_net_ctrl_ack *status;
43
44 /* The device always have SVQ enabled */
45 bool always_svq;
46
47 /* The device can isolate CVQ in its own ASID */
48 bool cvq_isolated;
49
50 bool started;
51 } VhostVDPAState;
52
53 const int vdpa_feature_bits[] = {
54 VIRTIO_F_NOTIFY_ON_EMPTY,
55 VIRTIO_RING_F_INDIRECT_DESC,
56 VIRTIO_RING_F_EVENT_IDX,
57 VIRTIO_F_ANY_LAYOUT,
58 VIRTIO_F_VERSION_1,
59 VIRTIO_NET_F_CSUM,
60 VIRTIO_NET_F_GUEST_CSUM,
61 VIRTIO_NET_F_GSO,
62 VIRTIO_NET_F_GUEST_TSO4,
63 VIRTIO_NET_F_GUEST_TSO6,
64 VIRTIO_NET_F_GUEST_ECN,
65 VIRTIO_NET_F_GUEST_UFO,
66 VIRTIO_NET_F_HOST_TSO4,
67 VIRTIO_NET_F_HOST_TSO6,
68 VIRTIO_NET_F_HOST_ECN,
69 VIRTIO_NET_F_HOST_UFO,
70 VIRTIO_NET_F_MRG_RXBUF,
71 VIRTIO_NET_F_MTU,
72 VIRTIO_NET_F_CTRL_RX,
73 VIRTIO_NET_F_CTRL_RX_EXTRA,
74 VIRTIO_NET_F_CTRL_VLAN,
75 VIRTIO_NET_F_CTRL_MAC_ADDR,
76 VIRTIO_NET_F_RSS,
77 VIRTIO_NET_F_MQ,
78 VIRTIO_NET_F_CTRL_VQ,
79 VIRTIO_F_IOMMU_PLATFORM,
80 VIRTIO_F_RING_PACKED,
81 VIRTIO_F_RING_RESET,
82 VIRTIO_NET_F_RSS,
83 VIRTIO_NET_F_HASH_REPORT,
84 VIRTIO_NET_F_STATUS,
85 VHOST_INVALID_FEATURE_BIT
86 };
87
88 /** Supported device specific feature bits with SVQ */
89 static const uint64_t vdpa_svq_device_features =
90 BIT_ULL(VIRTIO_NET_F_CSUM) |
91 BIT_ULL(VIRTIO_NET_F_GUEST_CSUM) |
92 BIT_ULL(VIRTIO_NET_F_MTU) |
93 BIT_ULL(VIRTIO_NET_F_MAC) |
94 BIT_ULL(VIRTIO_NET_F_GUEST_TSO4) |
95 BIT_ULL(VIRTIO_NET_F_GUEST_TSO6) |
96 BIT_ULL(VIRTIO_NET_F_GUEST_ECN) |
97 BIT_ULL(VIRTIO_NET_F_GUEST_UFO) |
98 BIT_ULL(VIRTIO_NET_F_HOST_TSO4) |
99 BIT_ULL(VIRTIO_NET_F_HOST_TSO6) |
100 BIT_ULL(VIRTIO_NET_F_HOST_ECN) |
101 BIT_ULL(VIRTIO_NET_F_HOST_UFO) |
102 BIT_ULL(VIRTIO_NET_F_MRG_RXBUF) |
103 BIT_ULL(VIRTIO_NET_F_STATUS) |
104 BIT_ULL(VIRTIO_NET_F_CTRL_VQ) |
105 BIT_ULL(VIRTIO_NET_F_MQ) |
106 BIT_ULL(VIRTIO_F_ANY_LAYOUT) |
107 BIT_ULL(VIRTIO_NET_F_CTRL_MAC_ADDR) |
108 /* VHOST_F_LOG_ALL is exposed by SVQ */
109 BIT_ULL(VHOST_F_LOG_ALL) |
110 BIT_ULL(VIRTIO_NET_F_RSC_EXT) |
111 BIT_ULL(VIRTIO_NET_F_STANDBY) |
112 BIT_ULL(VIRTIO_NET_F_SPEED_DUPLEX);
113
114 #define VHOST_VDPA_NET_CVQ_ASID 1
115
116 VHostNetState *vhost_vdpa_get_vhost_net(NetClientState *nc)
117 {
118 VhostVDPAState *s = DO_UPCAST(VhostVDPAState, nc, nc);
119 assert(nc->info->type == NET_CLIENT_DRIVER_VHOST_VDPA);
120 return s->vhost_net;
121 }
122
123 static size_t vhost_vdpa_net_cvq_cmd_len(void)
124 {
125 /*
126 * MAC_TABLE_SET is the ctrl command that produces the longer out buffer.
127 * In buffer is always 1 byte, so it should fit here
128 */
129 return sizeof(struct virtio_net_ctrl_hdr) +
130 2 * sizeof(struct virtio_net_ctrl_mac) +
131 MAC_TABLE_ENTRIES * ETH_ALEN;
132 }
133
134 static size_t vhost_vdpa_net_cvq_cmd_page_len(void)
135 {
136 return ROUND_UP(vhost_vdpa_net_cvq_cmd_len(), qemu_real_host_page_size());
137 }
138
139 static bool vhost_vdpa_net_valid_svq_features(uint64_t features, Error **errp)
140 {
141 uint64_t invalid_dev_features =
142 features & ~vdpa_svq_device_features &
143 /* Transport are all accepted at this point */
144 ~MAKE_64BIT_MASK(VIRTIO_TRANSPORT_F_START,
145 VIRTIO_TRANSPORT_F_END - VIRTIO_TRANSPORT_F_START);
146
147 if (invalid_dev_features) {
148 error_setg(errp, "vdpa svq does not work with features 0x%" PRIx64,
149 invalid_dev_features);
150 return false;
151 }
152
153 return vhost_svq_valid_features(features, errp);
154 }
155
156 static int vhost_vdpa_net_check_device_id(struct vhost_net *net)
157 {
158 uint32_t device_id;
159 int ret;
160 struct vhost_dev *hdev;
161
162 hdev = (struct vhost_dev *)&net->dev;
163 ret = hdev->vhost_ops->vhost_get_device_id(hdev, &device_id);
164 if (device_id != VIRTIO_ID_NET) {
165 return -ENOTSUP;
166 }
167 return ret;
168 }
169
170 static int vhost_vdpa_add(NetClientState *ncs, void *be,
171 int queue_pair_index, int nvqs)
172 {
173 VhostNetOptions options;
174 struct vhost_net *net = NULL;
175 VhostVDPAState *s;
176 int ret;
177
178 options.backend_type = VHOST_BACKEND_TYPE_VDPA;
179 assert(ncs->info->type == NET_CLIENT_DRIVER_VHOST_VDPA);
180 s = DO_UPCAST(VhostVDPAState, nc, ncs);
181 options.net_backend = ncs;
182 options.opaque = be;
183 options.busyloop_timeout = 0;
184 options.nvqs = nvqs;
185
186 net = vhost_net_init(&options);
187 if (!net) {
188 error_report("failed to init vhost_net for queue");
189 goto err_init;
190 }
191 s->vhost_net = net;
192 ret = vhost_vdpa_net_check_device_id(net);
193 if (ret) {
194 goto err_check;
195 }
196 return 0;
197 err_check:
198 vhost_net_cleanup(net);
199 g_free(net);
200 err_init:
201 return -1;
202 }
203
204 static void vhost_vdpa_cleanup(NetClientState *nc)
205 {
206 VhostVDPAState *s = DO_UPCAST(VhostVDPAState, nc, nc);
207
208 munmap(s->cvq_cmd_out_buffer, vhost_vdpa_net_cvq_cmd_page_len());
209 munmap(s->status, vhost_vdpa_net_cvq_cmd_page_len());
210 if (s->vhost_net) {
211 vhost_net_cleanup(s->vhost_net);
212 g_free(s->vhost_net);
213 s->vhost_net = NULL;
214 }
215 if (s->vhost_vdpa.device_fd >= 0) {
216 qemu_close(s->vhost_vdpa.device_fd);
217 s->vhost_vdpa.device_fd = -1;
218 }
219 }
220
221 static bool vhost_vdpa_has_vnet_hdr(NetClientState *nc)
222 {
223 assert(nc->info->type == NET_CLIENT_DRIVER_VHOST_VDPA);
224
225 return true;
226 }
227
228 static bool vhost_vdpa_has_ufo(NetClientState *nc)
229 {
230 assert(nc->info->type == NET_CLIENT_DRIVER_VHOST_VDPA);
231 VhostVDPAState *s = DO_UPCAST(VhostVDPAState, nc, nc);
232 uint64_t features = 0;
233 features |= (1ULL << VIRTIO_NET_F_HOST_UFO);
234 features = vhost_net_get_features(s->vhost_net, features);
235 return !!(features & (1ULL << VIRTIO_NET_F_HOST_UFO));
236
237 }
238
239 static bool vhost_vdpa_check_peer_type(NetClientState *nc, ObjectClass *oc,
240 Error **errp)
241 {
242 const char *driver = object_class_get_name(oc);
243
244 if (!g_str_has_prefix(driver, "virtio-net-")) {
245 error_setg(errp, "vhost-vdpa requires frontend driver virtio-net-*");
246 return false;
247 }
248
249 return true;
250 }
251
252 /** Dummy receive in case qemu falls back to userland tap networking */
253 static ssize_t vhost_vdpa_receive(NetClientState *nc, const uint8_t *buf,
254 size_t size)
255 {
256 return size;
257 }
258
259 /** From any vdpa net client, get the netclient of the first queue pair */
260 static VhostVDPAState *vhost_vdpa_net_first_nc_vdpa(VhostVDPAState *s)
261 {
262 NICState *nic = qemu_get_nic(s->nc.peer);
263 NetClientState *nc0 = qemu_get_peer(nic->ncs, 0);
264
265 return DO_UPCAST(VhostVDPAState, nc, nc0);
266 }
267
268 static void vhost_vdpa_net_log_global_enable(VhostVDPAState *s, bool enable)
269 {
270 struct vhost_vdpa *v = &s->vhost_vdpa;
271 VirtIONet *n;
272 VirtIODevice *vdev;
273 int data_queue_pairs, cvq, r;
274
275 /* We are only called on the first data vqs and only if x-svq is not set */
276 if (s->vhost_vdpa.shadow_vqs_enabled == enable) {
277 return;
278 }
279
280 vdev = v->dev->vdev;
281 n = VIRTIO_NET(vdev);
282 if (!n->vhost_started) {
283 return;
284 }
285
286 data_queue_pairs = n->multiqueue ? n->max_queue_pairs : 1;
287 cvq = virtio_vdev_has_feature(vdev, VIRTIO_NET_F_CTRL_VQ) ?
288 n->max_ncs - n->max_queue_pairs : 0;
289 /*
290 * TODO: vhost_net_stop does suspend, get_base and reset. We can be smarter
291 * in the future and resume the device if read-only operations between
292 * suspend and reset goes wrong.
293 */
294 vhost_net_stop(vdev, n->nic->ncs, data_queue_pairs, cvq);
295
296 /* Start will check migration setup_or_active to configure or not SVQ */
297 r = vhost_net_start(vdev, n->nic->ncs, data_queue_pairs, cvq);
298 if (unlikely(r < 0)) {
299 error_report("unable to start vhost net: %s(%d)", g_strerror(-r), -r);
300 }
301 }
302
303 static void vdpa_net_migration_state_notifier(Notifier *notifier, void *data)
304 {
305 MigrationState *migration = data;
306 VhostVDPAState *s = container_of(notifier, VhostVDPAState,
307 migration_state);
308
309 if (migration_in_setup(migration)) {
310 vhost_vdpa_net_log_global_enable(s, true);
311 } else if (migration_has_failed(migration)) {
312 vhost_vdpa_net_log_global_enable(s, false);
313 }
314 }
315
316 static void vhost_vdpa_net_data_start_first(VhostVDPAState *s)
317 {
318 struct vhost_vdpa *v = &s->vhost_vdpa;
319
320 add_migration_state_change_notifier(&s->migration_state);
321 if (v->shadow_vqs_enabled) {
322 v->iova_tree = vhost_iova_tree_new(v->iova_range.first,
323 v->iova_range.last);
324 }
325 }
326
327 static int vhost_vdpa_net_data_start(NetClientState *nc)
328 {
329 VhostVDPAState *s = DO_UPCAST(VhostVDPAState, nc, nc);
330 struct vhost_vdpa *v = &s->vhost_vdpa;
331
332 assert(nc->info->type == NET_CLIENT_DRIVER_VHOST_VDPA);
333
334 if (s->always_svq ||
335 migration_is_setup_or_active(migrate_get_current()->state)) {
336 v->shadow_vqs_enabled = true;
337 v->shadow_data = true;
338 } else {
339 v->shadow_vqs_enabled = false;
340 v->shadow_data = false;
341 }
342
343 if (v->index == 0) {
344 vhost_vdpa_net_data_start_first(s);
345 return 0;
346 }
347
348 if (v->shadow_vqs_enabled) {
349 VhostVDPAState *s0 = vhost_vdpa_net_first_nc_vdpa(s);
350 v->iova_tree = s0->vhost_vdpa.iova_tree;
351 }
352
353 return 0;
354 }
355
356 static void vhost_vdpa_net_client_stop(NetClientState *nc)
357 {
358 VhostVDPAState *s = DO_UPCAST(VhostVDPAState, nc, nc);
359 struct vhost_dev *dev;
360
361 assert(nc->info->type == NET_CLIENT_DRIVER_VHOST_VDPA);
362
363 if (s->vhost_vdpa.index == 0) {
364 remove_migration_state_change_notifier(&s->migration_state);
365 }
366
367 dev = s->vhost_vdpa.dev;
368 if (dev->vq_index + dev->nvqs == dev->vq_index_end) {
369 g_clear_pointer(&s->vhost_vdpa.iova_tree, vhost_iova_tree_delete);
370 }
371 }
372
373 static NetClientInfo net_vhost_vdpa_info = {
374 .type = NET_CLIENT_DRIVER_VHOST_VDPA,
375 .size = sizeof(VhostVDPAState),
376 .receive = vhost_vdpa_receive,
377 .start = vhost_vdpa_net_data_start,
378 .stop = vhost_vdpa_net_client_stop,
379 .cleanup = vhost_vdpa_cleanup,
380 .has_vnet_hdr = vhost_vdpa_has_vnet_hdr,
381 .has_ufo = vhost_vdpa_has_ufo,
382 .check_peer_type = vhost_vdpa_check_peer_type,
383 };
384
385 static int64_t vhost_vdpa_get_vring_group(int device_fd, unsigned vq_index,
386 Error **errp)
387 {
388 struct vhost_vring_state state = {
389 .index = vq_index,
390 };
391 int r = ioctl(device_fd, VHOST_VDPA_GET_VRING_GROUP, &state);
392
393 if (unlikely(r < 0)) {
394 r = -errno;
395 error_setg_errno(errp, errno, "Cannot get VQ %u group", vq_index);
396 return r;
397 }
398
399 return state.num;
400 }
401
402 static int vhost_vdpa_set_address_space_id(struct vhost_vdpa *v,
403 unsigned vq_group,
404 unsigned asid_num)
405 {
406 struct vhost_vring_state asid = {
407 .index = vq_group,
408 .num = asid_num,
409 };
410 int r;
411
412 r = ioctl(v->device_fd, VHOST_VDPA_SET_GROUP_ASID, &asid);
413 if (unlikely(r < 0)) {
414 error_report("Can't set vq group %u asid %u, errno=%d (%s)",
415 asid.index, asid.num, errno, g_strerror(errno));
416 }
417 return r;
418 }
419
420 static void vhost_vdpa_cvq_unmap_buf(struct vhost_vdpa *v, void *addr)
421 {
422 VhostIOVATree *tree = v->iova_tree;
423 DMAMap needle = {
424 /*
425 * No need to specify size or to look for more translations since
426 * this contiguous chunk was allocated by us.
427 */
428 .translated_addr = (hwaddr)(uintptr_t)addr,
429 };
430 const DMAMap *map = vhost_iova_tree_find_iova(tree, &needle);
431 int r;
432
433 if (unlikely(!map)) {
434 error_report("Cannot locate expected map");
435 return;
436 }
437
438 r = vhost_vdpa_dma_unmap(v, v->address_space_id, map->iova, map->size + 1);
439 if (unlikely(r != 0)) {
440 error_report("Device cannot unmap: %s(%d)", g_strerror(r), r);
441 }
442
443 vhost_iova_tree_remove(tree, *map);
444 }
445
446 /** Map CVQ buffer. */
447 static int vhost_vdpa_cvq_map_buf(struct vhost_vdpa *v, void *buf, size_t size,
448 bool write)
449 {
450 DMAMap map = {};
451 int r;
452
453 map.translated_addr = (hwaddr)(uintptr_t)buf;
454 map.size = size - 1;
455 map.perm = write ? IOMMU_RW : IOMMU_RO,
456 r = vhost_iova_tree_map_alloc(v->iova_tree, &map);
457 if (unlikely(r != IOVA_OK)) {
458 error_report("Cannot map injected element");
459 return r;
460 }
461
462 r = vhost_vdpa_dma_map(v, v->address_space_id, map.iova,
463 vhost_vdpa_net_cvq_cmd_page_len(), buf, !write);
464 if (unlikely(r < 0)) {
465 goto dma_map_err;
466 }
467
468 return 0;
469
470 dma_map_err:
471 vhost_iova_tree_remove(v->iova_tree, map);
472 return r;
473 }
474
475 static int vhost_vdpa_net_cvq_start(NetClientState *nc)
476 {
477 VhostVDPAState *s, *s0;
478 struct vhost_vdpa *v;
479 int64_t cvq_group;
480 int r;
481 Error *err = NULL;
482
483 assert(nc->info->type == NET_CLIENT_DRIVER_VHOST_VDPA);
484
485 s = DO_UPCAST(VhostVDPAState, nc, nc);
486 v = &s->vhost_vdpa;
487
488 s0 = vhost_vdpa_net_first_nc_vdpa(s);
489 v->shadow_data = s0->vhost_vdpa.shadow_vqs_enabled;
490 v->shadow_vqs_enabled = s->always_svq;
491 s->vhost_vdpa.address_space_id = VHOST_VDPA_GUEST_PA_ASID;
492
493 if (s->vhost_vdpa.shadow_data) {
494 /* SVQ is already configured for all virtqueues */
495 goto out;
496 }
497
498 /*
499 * If we early return in these cases SVQ will not be enabled. The migration
500 * will be blocked as long as vhost-vdpa backends will not offer _F_LOG.
501 */
502 if (!vhost_vdpa_net_valid_svq_features(v->dev->features, NULL)) {
503 return 0;
504 }
505
506 if (!s->cvq_isolated) {
507 return 0;
508 }
509
510 cvq_group = vhost_vdpa_get_vring_group(v->device_fd,
511 v->dev->vq_index_end - 1,
512 &err);
513 if (unlikely(cvq_group < 0)) {
514 error_report_err(err);
515 return cvq_group;
516 }
517
518 r = vhost_vdpa_set_address_space_id(v, cvq_group, VHOST_VDPA_NET_CVQ_ASID);
519 if (unlikely(r < 0)) {
520 return r;
521 }
522
523 v->shadow_vqs_enabled = true;
524 s->vhost_vdpa.address_space_id = VHOST_VDPA_NET_CVQ_ASID;
525
526 out:
527 if (!s->vhost_vdpa.shadow_vqs_enabled) {
528 return 0;
529 }
530
531 if (s0->vhost_vdpa.iova_tree) {
532 /*
533 * SVQ is already configured for all virtqueues. Reuse IOVA tree for
534 * simplicity, whether CVQ shares ASID with guest or not, because:
535 * - Memory listener need access to guest's memory addresses allocated
536 * in the IOVA tree.
537 * - There should be plenty of IOVA address space for both ASID not to
538 * worry about collisions between them. Guest's translations are
539 * still validated with virtio virtqueue_pop so there is no risk for
540 * the guest to access memory that it shouldn't.
541 *
542 * To allocate a iova tree per ASID is doable but it complicates the
543 * code and it is not worth it for the moment.
544 */
545 v->iova_tree = s0->vhost_vdpa.iova_tree;
546 } else {
547 v->iova_tree = vhost_iova_tree_new(v->iova_range.first,
548 v->iova_range.last);
549 }
550
551 r = vhost_vdpa_cvq_map_buf(&s->vhost_vdpa, s->cvq_cmd_out_buffer,
552 vhost_vdpa_net_cvq_cmd_page_len(), false);
553 if (unlikely(r < 0)) {
554 return r;
555 }
556
557 r = vhost_vdpa_cvq_map_buf(&s->vhost_vdpa, s->status,
558 vhost_vdpa_net_cvq_cmd_page_len(), true);
559 if (unlikely(r < 0)) {
560 vhost_vdpa_cvq_unmap_buf(&s->vhost_vdpa, s->cvq_cmd_out_buffer);
561 }
562
563 return r;
564 }
565
566 static void vhost_vdpa_net_cvq_stop(NetClientState *nc)
567 {
568 VhostVDPAState *s = DO_UPCAST(VhostVDPAState, nc, nc);
569
570 assert(nc->info->type == NET_CLIENT_DRIVER_VHOST_VDPA);
571
572 if (s->vhost_vdpa.shadow_vqs_enabled) {
573 vhost_vdpa_cvq_unmap_buf(&s->vhost_vdpa, s->cvq_cmd_out_buffer);
574 vhost_vdpa_cvq_unmap_buf(&s->vhost_vdpa, s->status);
575 }
576
577 vhost_vdpa_net_client_stop(nc);
578 }
579
580 static ssize_t vhost_vdpa_net_cvq_add(VhostVDPAState *s, size_t out_len,
581 size_t in_len)
582 {
583 /* Buffers for the device */
584 const struct iovec out = {
585 .iov_base = s->cvq_cmd_out_buffer,
586 .iov_len = out_len,
587 };
588 const struct iovec in = {
589 .iov_base = s->status,
590 .iov_len = sizeof(virtio_net_ctrl_ack),
591 };
592 VhostShadowVirtqueue *svq = g_ptr_array_index(s->vhost_vdpa.shadow_vqs, 0);
593 int r;
594
595 r = vhost_svq_add(svq, &out, 1, &in, 1, NULL);
596 if (unlikely(r != 0)) {
597 if (unlikely(r == -ENOSPC)) {
598 qemu_log_mask(LOG_GUEST_ERROR, "%s: No space on device queue\n",
599 __func__);
600 }
601 return r;
602 }
603
604 /*
605 * We can poll here since we've had BQL from the time we sent the
606 * descriptor. Also, we need to take the answer before SVQ pulls by itself,
607 * when BQL is released
608 */
609 return vhost_svq_poll(svq);
610 }
611
612 static ssize_t vhost_vdpa_net_load_cmd(VhostVDPAState *s, uint8_t class,
613 uint8_t cmd, const void *data,
614 size_t data_size)
615 {
616 const struct virtio_net_ctrl_hdr ctrl = {
617 .class = class,
618 .cmd = cmd,
619 };
620
621 assert(data_size < vhost_vdpa_net_cvq_cmd_page_len() - sizeof(ctrl));
622
623 memcpy(s->cvq_cmd_out_buffer, &ctrl, sizeof(ctrl));
624 memcpy(s->cvq_cmd_out_buffer + sizeof(ctrl), data, data_size);
625
626 return vhost_vdpa_net_cvq_add(s, sizeof(ctrl) + data_size,
627 sizeof(virtio_net_ctrl_ack));
628 }
629
630 static int vhost_vdpa_net_load_mac(VhostVDPAState *s, const VirtIONet *n)
631 {
632 uint64_t features = n->parent_obj.guest_features;
633 if (features & BIT_ULL(VIRTIO_NET_F_CTRL_MAC_ADDR)) {
634 ssize_t dev_written = vhost_vdpa_net_load_cmd(s, VIRTIO_NET_CTRL_MAC,
635 VIRTIO_NET_CTRL_MAC_ADDR_SET,
636 n->mac, sizeof(n->mac));
637 if (unlikely(dev_written < 0)) {
638 return dev_written;
639 }
640
641 return *s->status != VIRTIO_NET_OK;
642 }
643
644 return 0;
645 }
646
647 static int vhost_vdpa_net_load_mq(VhostVDPAState *s,
648 const VirtIONet *n)
649 {
650 struct virtio_net_ctrl_mq mq;
651 uint64_t features = n->parent_obj.guest_features;
652 ssize_t dev_written;
653
654 if (!(features & BIT_ULL(VIRTIO_NET_F_MQ))) {
655 return 0;
656 }
657
658 mq.virtqueue_pairs = cpu_to_le16(n->curr_queue_pairs);
659 dev_written = vhost_vdpa_net_load_cmd(s, VIRTIO_NET_CTRL_MQ,
660 VIRTIO_NET_CTRL_MQ_VQ_PAIRS_SET, &mq,
661 sizeof(mq));
662 if (unlikely(dev_written < 0)) {
663 return dev_written;
664 }
665
666 return *s->status != VIRTIO_NET_OK;
667 }
668
669 static int vhost_vdpa_net_load(NetClientState *nc)
670 {
671 VhostVDPAState *s = DO_UPCAST(VhostVDPAState, nc, nc);
672 struct vhost_vdpa *v = &s->vhost_vdpa;
673 const VirtIONet *n;
674 int r;
675
676 assert(nc->info->type == NET_CLIENT_DRIVER_VHOST_VDPA);
677
678 if (!v->shadow_vqs_enabled) {
679 return 0;
680 }
681
682 n = VIRTIO_NET(v->dev->vdev);
683 r = vhost_vdpa_net_load_mac(s, n);
684 if (unlikely(r < 0)) {
685 return r;
686 }
687 r = vhost_vdpa_net_load_mq(s, n);
688 if (unlikely(r)) {
689 return r;
690 }
691
692 return 0;
693 }
694
695 static NetClientInfo net_vhost_vdpa_cvq_info = {
696 .type = NET_CLIENT_DRIVER_VHOST_VDPA,
697 .size = sizeof(VhostVDPAState),
698 .receive = vhost_vdpa_receive,
699 .start = vhost_vdpa_net_cvq_start,
700 .load = vhost_vdpa_net_load,
701 .stop = vhost_vdpa_net_cvq_stop,
702 .cleanup = vhost_vdpa_cleanup,
703 .has_vnet_hdr = vhost_vdpa_has_vnet_hdr,
704 .has_ufo = vhost_vdpa_has_ufo,
705 .check_peer_type = vhost_vdpa_check_peer_type,
706 };
707
708 /**
709 * Validate and copy control virtqueue commands.
710 *
711 * Following QEMU guidelines, we offer a copy of the buffers to the device to
712 * prevent TOCTOU bugs.
713 */
714 static int vhost_vdpa_net_handle_ctrl_avail(VhostShadowVirtqueue *svq,
715 VirtQueueElement *elem,
716 void *opaque)
717 {
718 VhostVDPAState *s = opaque;
719 size_t in_len;
720 virtio_net_ctrl_ack status = VIRTIO_NET_ERR;
721 /* Out buffer sent to both the vdpa device and the device model */
722 struct iovec out = {
723 .iov_base = s->cvq_cmd_out_buffer,
724 };
725 /* in buffer used for device model */
726 const struct iovec in = {
727 .iov_base = &status,
728 .iov_len = sizeof(status),
729 };
730 ssize_t dev_written = -EINVAL;
731
732 out.iov_len = iov_to_buf(elem->out_sg, elem->out_num, 0,
733 s->cvq_cmd_out_buffer,
734 vhost_vdpa_net_cvq_cmd_len());
735 if (*(uint8_t *)s->cvq_cmd_out_buffer == VIRTIO_NET_CTRL_ANNOUNCE) {
736 /*
737 * Guest announce capability is emulated by qemu, so don't forward to
738 * the device.
739 */
740 dev_written = sizeof(status);
741 *s->status = VIRTIO_NET_OK;
742 } else {
743 dev_written = vhost_vdpa_net_cvq_add(s, out.iov_len, sizeof(status));
744 if (unlikely(dev_written < 0)) {
745 goto out;
746 }
747 }
748
749 if (unlikely(dev_written < sizeof(status))) {
750 error_report("Insufficient written data (%zu)", dev_written);
751 goto out;
752 }
753
754 if (*s->status != VIRTIO_NET_OK) {
755 return VIRTIO_NET_ERR;
756 }
757
758 status = VIRTIO_NET_ERR;
759 virtio_net_handle_ctrl_iov(svq->vdev, &in, 1, &out, 1);
760 if (status != VIRTIO_NET_OK) {
761 error_report("Bad CVQ processing in model");
762 }
763
764 out:
765 in_len = iov_from_buf(elem->in_sg, elem->in_num, 0, &status,
766 sizeof(status));
767 if (unlikely(in_len < sizeof(status))) {
768 error_report("Bad device CVQ written length");
769 }
770 vhost_svq_push_elem(svq, elem, MIN(in_len, sizeof(status)));
771 g_free(elem);
772 return dev_written < 0 ? dev_written : 0;
773 }
774
775 static const VhostShadowVirtqueueOps vhost_vdpa_net_svq_ops = {
776 .avail_handler = vhost_vdpa_net_handle_ctrl_avail,
777 };
778
779 /**
780 * Probe if CVQ is isolated
781 *
782 * @device_fd The vdpa device fd
783 * @features Features offered by the device.
784 * @cvq_index The control vq pair index
785 *
786 * Returns <0 in case of failure, 0 if false and 1 if true.
787 */
788 static int vhost_vdpa_probe_cvq_isolation(int device_fd, uint64_t features,
789 int cvq_index, Error **errp)
790 {
791 uint64_t backend_features;
792 int64_t cvq_group;
793 uint8_t status = VIRTIO_CONFIG_S_ACKNOWLEDGE |
794 VIRTIO_CONFIG_S_DRIVER |
795 VIRTIO_CONFIG_S_FEATURES_OK;
796 int r;
797
798 ERRP_GUARD();
799
800 r = ioctl(device_fd, VHOST_GET_BACKEND_FEATURES, &backend_features);
801 if (unlikely(r < 0)) {
802 error_setg_errno(errp, errno, "Cannot get vdpa backend_features");
803 return r;
804 }
805
806 if (!(backend_features & BIT_ULL(VHOST_BACKEND_F_IOTLB_ASID))) {
807 return 0;
808 }
809
810 r = ioctl(device_fd, VHOST_SET_FEATURES, &features);
811 if (unlikely(r)) {
812 error_setg_errno(errp, errno, "Cannot set features");
813 }
814
815 r = ioctl(device_fd, VHOST_VDPA_SET_STATUS, &status);
816 if (unlikely(r)) {
817 error_setg_errno(errp, -r, "Cannot set device features");
818 goto out;
819 }
820
821 cvq_group = vhost_vdpa_get_vring_group(device_fd, cvq_index, errp);
822 if (unlikely(cvq_group < 0)) {
823 if (cvq_group != -ENOTSUP) {
824 r = cvq_group;
825 goto out;
826 }
827
828 /*
829 * The kernel report VHOST_BACKEND_F_IOTLB_ASID if the vdpa frontend
830 * support ASID even if the parent driver does not. The CVQ cannot be
831 * isolated in this case.
832 */
833 error_free(*errp);
834 *errp = NULL;
835 r = 0;
836 goto out;
837 }
838
839 for (int i = 0; i < cvq_index; ++i) {
840 int64_t group = vhost_vdpa_get_vring_group(device_fd, i, errp);
841 if (unlikely(group < 0)) {
842 r = group;
843 goto out;
844 }
845
846 if (group == (int64_t)cvq_group) {
847 r = 0;
848 goto out;
849 }
850 }
851
852 r = 1;
853
854 out:
855 status = 0;
856 ioctl(device_fd, VHOST_VDPA_SET_STATUS, &status);
857 return r;
858 }
859
860 static NetClientState *net_vhost_vdpa_init(NetClientState *peer,
861 const char *device,
862 const char *name,
863 int vdpa_device_fd,
864 int queue_pair_index,
865 int nvqs,
866 bool is_datapath,
867 bool svq,
868 struct vhost_vdpa_iova_range iova_range,
869 uint64_t features,
870 Error **errp)
871 {
872 NetClientState *nc = NULL;
873 VhostVDPAState *s;
874 int ret = 0;
875 assert(name);
876 int cvq_isolated;
877
878 if (is_datapath) {
879 nc = qemu_new_net_client(&net_vhost_vdpa_info, peer, device,
880 name);
881 } else {
882 cvq_isolated = vhost_vdpa_probe_cvq_isolation(vdpa_device_fd, features,
883 queue_pair_index * 2,
884 errp);
885 if (unlikely(cvq_isolated < 0)) {
886 return NULL;
887 }
888
889 nc = qemu_new_net_control_client(&net_vhost_vdpa_cvq_info, peer,
890 device, name);
891 }
892 qemu_set_info_str(nc, TYPE_VHOST_VDPA);
893 s = DO_UPCAST(VhostVDPAState, nc, nc);
894
895 s->vhost_vdpa.device_fd = vdpa_device_fd;
896 s->vhost_vdpa.index = queue_pair_index;
897 s->always_svq = svq;
898 s->migration_state.notify = vdpa_net_migration_state_notifier;
899 s->vhost_vdpa.shadow_vqs_enabled = svq;
900 s->vhost_vdpa.iova_range = iova_range;
901 s->vhost_vdpa.shadow_data = svq;
902 if (queue_pair_index == 0) {
903 vhost_vdpa_net_valid_svq_features(features,
904 &s->vhost_vdpa.migration_blocker);
905 } else if (!is_datapath) {
906 s->cvq_cmd_out_buffer = mmap(NULL, vhost_vdpa_net_cvq_cmd_page_len(),
907 PROT_READ | PROT_WRITE,
908 MAP_SHARED | MAP_ANONYMOUS, -1, 0);
909 s->status = mmap(NULL, vhost_vdpa_net_cvq_cmd_page_len(),
910 PROT_READ | PROT_WRITE, MAP_SHARED | MAP_ANONYMOUS,
911 -1, 0);
912
913 s->vhost_vdpa.shadow_vq_ops = &vhost_vdpa_net_svq_ops;
914 s->vhost_vdpa.shadow_vq_ops_opaque = s;
915 s->cvq_isolated = cvq_isolated;
916
917 /*
918 * TODO: We cannot migrate devices with CVQ and no x-svq enabled as
919 * there is no way to set the device state (MAC, MQ, etc) before
920 * starting the datapath.
921 *
922 * Migration blocker ownership now belongs to s->vhost_vdpa.
923 */
924 if (!svq) {
925 error_setg(&s->vhost_vdpa.migration_blocker,
926 "net vdpa cannot migrate with CVQ feature");
927 }
928 }
929 ret = vhost_vdpa_add(nc, (void *)&s->vhost_vdpa, queue_pair_index, nvqs);
930 if (ret) {
931 qemu_del_net_client(nc);
932 return NULL;
933 }
934 return nc;
935 }
936
937 static int vhost_vdpa_get_features(int fd, uint64_t *features, Error **errp)
938 {
939 int ret = ioctl(fd, VHOST_GET_FEATURES, features);
940 if (unlikely(ret < 0)) {
941 error_setg_errno(errp, errno,
942 "Fail to query features from vhost-vDPA device");
943 }
944 return ret;
945 }
946
947 static int vhost_vdpa_get_max_queue_pairs(int fd, uint64_t features,
948 int *has_cvq, Error **errp)
949 {
950 unsigned long config_size = offsetof(struct vhost_vdpa_config, buf);
951 g_autofree struct vhost_vdpa_config *config = NULL;
952 __virtio16 *max_queue_pairs;
953 int ret;
954
955 if (features & (1 << VIRTIO_NET_F_CTRL_VQ)) {
956 *has_cvq = 1;
957 } else {
958 *has_cvq = 0;
959 }
960
961 if (features & (1 << VIRTIO_NET_F_MQ)) {
962 config = g_malloc0(config_size + sizeof(*max_queue_pairs));
963 config->off = offsetof(struct virtio_net_config, max_virtqueue_pairs);
964 config->len = sizeof(*max_queue_pairs);
965
966 ret = ioctl(fd, VHOST_VDPA_GET_CONFIG, config);
967 if (ret) {
968 error_setg(errp, "Fail to get config from vhost-vDPA device");
969 return -ret;
970 }
971
972 max_queue_pairs = (__virtio16 *)&config->buf;
973
974 return lduw_le_p(max_queue_pairs);
975 }
976
977 return 1;
978 }
979
980 int net_init_vhost_vdpa(const Netdev *netdev, const char *name,
981 NetClientState *peer, Error **errp)
982 {
983 const NetdevVhostVDPAOptions *opts;
984 uint64_t features;
985 int vdpa_device_fd;
986 g_autofree NetClientState **ncs = NULL;
987 struct vhost_vdpa_iova_range iova_range;
988 NetClientState *nc;
989 int queue_pairs, r, i = 0, has_cvq = 0;
990
991 assert(netdev->type == NET_CLIENT_DRIVER_VHOST_VDPA);
992 opts = &netdev->u.vhost_vdpa;
993 if (!opts->vhostdev && !opts->vhostfd) {
994 error_setg(errp,
995 "vhost-vdpa: neither vhostdev= nor vhostfd= was specified");
996 return -1;
997 }
998
999 if (opts->vhostdev && opts->vhostfd) {
1000 error_setg(errp,
1001 "vhost-vdpa: vhostdev= and vhostfd= are mutually exclusive");
1002 return -1;
1003 }
1004
1005 if (opts->vhostdev) {
1006 vdpa_device_fd = qemu_open(opts->vhostdev, O_RDWR, errp);
1007 if (vdpa_device_fd == -1) {
1008 return -errno;
1009 }
1010 } else {
1011 /* has_vhostfd */
1012 vdpa_device_fd = monitor_fd_param(monitor_cur(), opts->vhostfd, errp);
1013 if (vdpa_device_fd == -1) {
1014 error_prepend(errp, "vhost-vdpa: unable to parse vhostfd: ");
1015 return -1;
1016 }
1017 }
1018
1019 r = vhost_vdpa_get_features(vdpa_device_fd, &features, errp);
1020 if (unlikely(r < 0)) {
1021 goto err;
1022 }
1023
1024 queue_pairs = vhost_vdpa_get_max_queue_pairs(vdpa_device_fd, features,
1025 &has_cvq, errp);
1026 if (queue_pairs < 0) {
1027 qemu_close(vdpa_device_fd);
1028 return queue_pairs;
1029 }
1030
1031 r = vhost_vdpa_get_iova_range(vdpa_device_fd, &iova_range);
1032 if (unlikely(r < 0)) {
1033 error_setg(errp, "vhost-vdpa: get iova range failed: %s",
1034 strerror(-r));
1035 goto err;
1036 }
1037
1038 if (opts->x_svq && !vhost_vdpa_net_valid_svq_features(features, errp)) {
1039 goto err;
1040 }
1041
1042 ncs = g_malloc0(sizeof(*ncs) * queue_pairs);
1043
1044 for (i = 0; i < queue_pairs; i++) {
1045 ncs[i] = net_vhost_vdpa_init(peer, TYPE_VHOST_VDPA, name,
1046 vdpa_device_fd, i, 2, true, opts->x_svq,
1047 iova_range, features, errp);
1048 if (!ncs[i])
1049 goto err;
1050 }
1051
1052 if (has_cvq) {
1053 nc = net_vhost_vdpa_init(peer, TYPE_VHOST_VDPA, name,
1054 vdpa_device_fd, i, 1, false,
1055 opts->x_svq, iova_range, features, errp);
1056 if (!nc)
1057 goto err;
1058 }
1059
1060 return 0;
1061
1062 err:
1063 if (i) {
1064 for (i--; i >= 0; i--) {
1065 qemu_del_net_client(ncs[i]);
1066 }
1067 }
1068
1069 qemu_close(vdpa_device_fd);
1070
1071 return -1;
1072 }