]> git.proxmox.com Git - mirror_qemu.git/blob - net/vhost-vdpa.c
vdpa: remove net cvq migration blocker
[mirror_qemu.git] / net / vhost-vdpa.c
1 /*
2 * vhost-vdpa.c
3 *
4 * Copyright(c) 2017-2018 Intel Corporation.
5 * Copyright(c) 2020 Red Hat, Inc.
6 *
7 * This work is licensed under the terms of the GNU GPL, version 2 or later.
8 * See the COPYING file in the top-level directory.
9 *
10 */
11
12 #include "qemu/osdep.h"
13 #include "clients.h"
14 #include "hw/virtio/virtio-net.h"
15 #include "net/vhost_net.h"
16 #include "net/vhost-vdpa.h"
17 #include "hw/virtio/vhost-vdpa.h"
18 #include "qemu/config-file.h"
19 #include "qemu/error-report.h"
20 #include "qemu/log.h"
21 #include "qemu/memalign.h"
22 #include "qemu/option.h"
23 #include "qapi/error.h"
24 #include <linux/vhost.h>
25 #include <sys/ioctl.h>
26 #include <err.h>
27 #include "standard-headers/linux/virtio_net.h"
28 #include "monitor/monitor.h"
29 #include "migration/migration.h"
30 #include "migration/misc.h"
31 #include "hw/virtio/vhost.h"
32
33 /* Todo:need to add the multiqueue support here */
34 typedef struct VhostVDPAState {
35 NetClientState nc;
36 struct vhost_vdpa vhost_vdpa;
37 Notifier migration_state;
38 VHostNetState *vhost_net;
39
40 /* Control commands shadow buffers */
41 void *cvq_cmd_out_buffer;
42 virtio_net_ctrl_ack *status;
43
44 /* The device always have SVQ enabled */
45 bool always_svq;
46
47 /* The device can isolate CVQ in its own ASID */
48 bool cvq_isolated;
49
50 bool started;
51 } VhostVDPAState;
52
53 /*
54 * The array is sorted alphabetically in ascending order,
55 * with the exception of VHOST_INVALID_FEATURE_BIT,
56 * which should always be the last entry.
57 */
58 const int vdpa_feature_bits[] = {
59 VIRTIO_F_ANY_LAYOUT,
60 VIRTIO_F_IOMMU_PLATFORM,
61 VIRTIO_F_NOTIFY_ON_EMPTY,
62 VIRTIO_F_RING_PACKED,
63 VIRTIO_F_RING_RESET,
64 VIRTIO_F_VERSION_1,
65 VIRTIO_NET_F_CSUM,
66 VIRTIO_NET_F_CTRL_GUEST_OFFLOADS,
67 VIRTIO_NET_F_CTRL_MAC_ADDR,
68 VIRTIO_NET_F_CTRL_RX,
69 VIRTIO_NET_F_CTRL_RX_EXTRA,
70 VIRTIO_NET_F_CTRL_VLAN,
71 VIRTIO_NET_F_CTRL_VQ,
72 VIRTIO_NET_F_GSO,
73 VIRTIO_NET_F_GUEST_CSUM,
74 VIRTIO_NET_F_GUEST_ECN,
75 VIRTIO_NET_F_GUEST_TSO4,
76 VIRTIO_NET_F_GUEST_TSO6,
77 VIRTIO_NET_F_GUEST_UFO,
78 VIRTIO_NET_F_GUEST_USO4,
79 VIRTIO_NET_F_GUEST_USO6,
80 VIRTIO_NET_F_HASH_REPORT,
81 VIRTIO_NET_F_HOST_ECN,
82 VIRTIO_NET_F_HOST_TSO4,
83 VIRTIO_NET_F_HOST_TSO6,
84 VIRTIO_NET_F_HOST_UFO,
85 VIRTIO_NET_F_HOST_USO,
86 VIRTIO_NET_F_MQ,
87 VIRTIO_NET_F_MRG_RXBUF,
88 VIRTIO_NET_F_MTU,
89 VIRTIO_NET_F_RSS,
90 VIRTIO_NET_F_STATUS,
91 VIRTIO_RING_F_EVENT_IDX,
92 VIRTIO_RING_F_INDIRECT_DESC,
93
94 /* VHOST_INVALID_FEATURE_BIT should always be the last entry */
95 VHOST_INVALID_FEATURE_BIT
96 };
97
98 /** Supported device specific feature bits with SVQ */
99 static const uint64_t vdpa_svq_device_features =
100 BIT_ULL(VIRTIO_NET_F_CSUM) |
101 BIT_ULL(VIRTIO_NET_F_GUEST_CSUM) |
102 BIT_ULL(VIRTIO_NET_F_CTRL_GUEST_OFFLOADS) |
103 BIT_ULL(VIRTIO_NET_F_MTU) |
104 BIT_ULL(VIRTIO_NET_F_MAC) |
105 BIT_ULL(VIRTIO_NET_F_GUEST_TSO4) |
106 BIT_ULL(VIRTIO_NET_F_GUEST_TSO6) |
107 BIT_ULL(VIRTIO_NET_F_GUEST_ECN) |
108 BIT_ULL(VIRTIO_NET_F_GUEST_UFO) |
109 BIT_ULL(VIRTIO_NET_F_HOST_TSO4) |
110 BIT_ULL(VIRTIO_NET_F_HOST_TSO6) |
111 BIT_ULL(VIRTIO_NET_F_HOST_ECN) |
112 BIT_ULL(VIRTIO_NET_F_HOST_UFO) |
113 BIT_ULL(VIRTIO_NET_F_MRG_RXBUF) |
114 BIT_ULL(VIRTIO_NET_F_STATUS) |
115 BIT_ULL(VIRTIO_NET_F_CTRL_VQ) |
116 BIT_ULL(VIRTIO_NET_F_CTRL_RX) |
117 BIT_ULL(VIRTIO_NET_F_CTRL_VLAN) |
118 BIT_ULL(VIRTIO_NET_F_CTRL_RX_EXTRA) |
119 BIT_ULL(VIRTIO_NET_F_MQ) |
120 BIT_ULL(VIRTIO_F_ANY_LAYOUT) |
121 BIT_ULL(VIRTIO_NET_F_CTRL_MAC_ADDR) |
122 /* VHOST_F_LOG_ALL is exposed by SVQ */
123 BIT_ULL(VHOST_F_LOG_ALL) |
124 BIT_ULL(VIRTIO_NET_F_RSC_EXT) |
125 BIT_ULL(VIRTIO_NET_F_STANDBY) |
126 BIT_ULL(VIRTIO_NET_F_SPEED_DUPLEX);
127
128 #define VHOST_VDPA_NET_CVQ_ASID 1
129
130 VHostNetState *vhost_vdpa_get_vhost_net(NetClientState *nc)
131 {
132 VhostVDPAState *s = DO_UPCAST(VhostVDPAState, nc, nc);
133 assert(nc->info->type == NET_CLIENT_DRIVER_VHOST_VDPA);
134 return s->vhost_net;
135 }
136
137 static size_t vhost_vdpa_net_cvq_cmd_len(void)
138 {
139 /*
140 * MAC_TABLE_SET is the ctrl command that produces the longer out buffer.
141 * In buffer is always 1 byte, so it should fit here
142 */
143 return sizeof(struct virtio_net_ctrl_hdr) +
144 2 * sizeof(struct virtio_net_ctrl_mac) +
145 MAC_TABLE_ENTRIES * ETH_ALEN;
146 }
147
148 static size_t vhost_vdpa_net_cvq_cmd_page_len(void)
149 {
150 return ROUND_UP(vhost_vdpa_net_cvq_cmd_len(), qemu_real_host_page_size());
151 }
152
153 static bool vhost_vdpa_net_valid_svq_features(uint64_t features, Error **errp)
154 {
155 uint64_t invalid_dev_features =
156 features & ~vdpa_svq_device_features &
157 /* Transport are all accepted at this point */
158 ~MAKE_64BIT_MASK(VIRTIO_TRANSPORT_F_START,
159 VIRTIO_TRANSPORT_F_END - VIRTIO_TRANSPORT_F_START);
160
161 if (invalid_dev_features) {
162 error_setg(errp, "vdpa svq does not work with features 0x%" PRIx64,
163 invalid_dev_features);
164 return false;
165 }
166
167 return vhost_svq_valid_features(features, errp);
168 }
169
170 static int vhost_vdpa_net_check_device_id(struct vhost_net *net)
171 {
172 uint32_t device_id;
173 int ret;
174 struct vhost_dev *hdev;
175
176 hdev = (struct vhost_dev *)&net->dev;
177 ret = hdev->vhost_ops->vhost_get_device_id(hdev, &device_id);
178 if (device_id != VIRTIO_ID_NET) {
179 return -ENOTSUP;
180 }
181 return ret;
182 }
183
184 static int vhost_vdpa_add(NetClientState *ncs, void *be,
185 int queue_pair_index, int nvqs)
186 {
187 VhostNetOptions options;
188 struct vhost_net *net = NULL;
189 VhostVDPAState *s;
190 int ret;
191
192 options.backend_type = VHOST_BACKEND_TYPE_VDPA;
193 assert(ncs->info->type == NET_CLIENT_DRIVER_VHOST_VDPA);
194 s = DO_UPCAST(VhostVDPAState, nc, ncs);
195 options.net_backend = ncs;
196 options.opaque = be;
197 options.busyloop_timeout = 0;
198 options.nvqs = nvqs;
199
200 net = vhost_net_init(&options);
201 if (!net) {
202 error_report("failed to init vhost_net for queue");
203 goto err_init;
204 }
205 s->vhost_net = net;
206 ret = vhost_vdpa_net_check_device_id(net);
207 if (ret) {
208 goto err_check;
209 }
210 return 0;
211 err_check:
212 vhost_net_cleanup(net);
213 g_free(net);
214 err_init:
215 return -1;
216 }
217
218 static void vhost_vdpa_cleanup(NetClientState *nc)
219 {
220 VhostVDPAState *s = DO_UPCAST(VhostVDPAState, nc, nc);
221
222 /*
223 * If a peer NIC is attached, do not cleanup anything.
224 * Cleanup will happen as a part of qemu_cleanup() -> net_cleanup()
225 * when the guest is shutting down.
226 */
227 if (nc->peer && nc->peer->info->type == NET_CLIENT_DRIVER_NIC) {
228 return;
229 }
230 munmap(s->cvq_cmd_out_buffer, vhost_vdpa_net_cvq_cmd_page_len());
231 munmap(s->status, vhost_vdpa_net_cvq_cmd_page_len());
232 if (s->vhost_net) {
233 vhost_net_cleanup(s->vhost_net);
234 g_free(s->vhost_net);
235 s->vhost_net = NULL;
236 }
237 if (s->vhost_vdpa.device_fd >= 0) {
238 qemu_close(s->vhost_vdpa.device_fd);
239 s->vhost_vdpa.device_fd = -1;
240 }
241 }
242
243 static bool vhost_vdpa_has_vnet_hdr(NetClientState *nc)
244 {
245 assert(nc->info->type == NET_CLIENT_DRIVER_VHOST_VDPA);
246
247 return true;
248 }
249
250 static bool vhost_vdpa_has_ufo(NetClientState *nc)
251 {
252 assert(nc->info->type == NET_CLIENT_DRIVER_VHOST_VDPA);
253 VhostVDPAState *s = DO_UPCAST(VhostVDPAState, nc, nc);
254 uint64_t features = 0;
255 features |= (1ULL << VIRTIO_NET_F_HOST_UFO);
256 features = vhost_net_get_features(s->vhost_net, features);
257 return !!(features & (1ULL << VIRTIO_NET_F_HOST_UFO));
258
259 }
260
261 static bool vhost_vdpa_check_peer_type(NetClientState *nc, ObjectClass *oc,
262 Error **errp)
263 {
264 const char *driver = object_class_get_name(oc);
265
266 if (!g_str_has_prefix(driver, "virtio-net-")) {
267 error_setg(errp, "vhost-vdpa requires frontend driver virtio-net-*");
268 return false;
269 }
270
271 return true;
272 }
273
274 /** Dummy receive in case qemu falls back to userland tap networking */
275 static ssize_t vhost_vdpa_receive(NetClientState *nc, const uint8_t *buf,
276 size_t size)
277 {
278 return size;
279 }
280
281 /** From any vdpa net client, get the netclient of the first queue pair */
282 static VhostVDPAState *vhost_vdpa_net_first_nc_vdpa(VhostVDPAState *s)
283 {
284 NICState *nic = qemu_get_nic(s->nc.peer);
285 NetClientState *nc0 = qemu_get_peer(nic->ncs, 0);
286
287 return DO_UPCAST(VhostVDPAState, nc, nc0);
288 }
289
290 static void vhost_vdpa_net_log_global_enable(VhostVDPAState *s, bool enable)
291 {
292 struct vhost_vdpa *v = &s->vhost_vdpa;
293 VirtIONet *n;
294 VirtIODevice *vdev;
295 int data_queue_pairs, cvq, r;
296
297 /* We are only called on the first data vqs and only if x-svq is not set */
298 if (s->vhost_vdpa.shadow_vqs_enabled == enable) {
299 return;
300 }
301
302 vdev = v->dev->vdev;
303 n = VIRTIO_NET(vdev);
304 if (!n->vhost_started) {
305 return;
306 }
307
308 data_queue_pairs = n->multiqueue ? n->max_queue_pairs : 1;
309 cvq = virtio_vdev_has_feature(vdev, VIRTIO_NET_F_CTRL_VQ) ?
310 n->max_ncs - n->max_queue_pairs : 0;
311 /*
312 * TODO: vhost_net_stop does suspend, get_base and reset. We can be smarter
313 * in the future and resume the device if read-only operations between
314 * suspend and reset goes wrong.
315 */
316 vhost_net_stop(vdev, n->nic->ncs, data_queue_pairs, cvq);
317
318 /* Start will check migration setup_or_active to configure or not SVQ */
319 r = vhost_net_start(vdev, n->nic->ncs, data_queue_pairs, cvq);
320 if (unlikely(r < 0)) {
321 error_report("unable to start vhost net: %s(%d)", g_strerror(-r), -r);
322 }
323 }
324
325 static void vdpa_net_migration_state_notifier(Notifier *notifier, void *data)
326 {
327 MigrationState *migration = data;
328 VhostVDPAState *s = container_of(notifier, VhostVDPAState,
329 migration_state);
330
331 if (migration_in_setup(migration)) {
332 vhost_vdpa_net_log_global_enable(s, true);
333 } else if (migration_has_failed(migration)) {
334 vhost_vdpa_net_log_global_enable(s, false);
335 }
336 }
337
338 static void vhost_vdpa_net_data_start_first(VhostVDPAState *s)
339 {
340 struct vhost_vdpa *v = &s->vhost_vdpa;
341
342 add_migration_state_change_notifier(&s->migration_state);
343 if (v->shadow_vqs_enabled) {
344 v->iova_tree = vhost_iova_tree_new(v->iova_range.first,
345 v->iova_range.last);
346 }
347 }
348
349 static int vhost_vdpa_net_data_start(NetClientState *nc)
350 {
351 VhostVDPAState *s = DO_UPCAST(VhostVDPAState, nc, nc);
352 struct vhost_vdpa *v = &s->vhost_vdpa;
353
354 assert(nc->info->type == NET_CLIENT_DRIVER_VHOST_VDPA);
355
356 if (s->always_svq ||
357 migration_is_setup_or_active(migrate_get_current()->state)) {
358 v->shadow_vqs_enabled = true;
359 v->shadow_data = true;
360 } else {
361 v->shadow_vqs_enabled = false;
362 v->shadow_data = false;
363 }
364
365 if (v->index == 0) {
366 vhost_vdpa_net_data_start_first(s);
367 return 0;
368 }
369
370 if (v->shadow_vqs_enabled) {
371 VhostVDPAState *s0 = vhost_vdpa_net_first_nc_vdpa(s);
372 v->iova_tree = s0->vhost_vdpa.iova_tree;
373 }
374
375 return 0;
376 }
377
378 static int vhost_vdpa_net_data_load(NetClientState *nc)
379 {
380 VhostVDPAState *s = DO_UPCAST(VhostVDPAState, nc, nc);
381 struct vhost_vdpa *v = &s->vhost_vdpa;
382 bool has_cvq = v->dev->vq_index_end % 2;
383
384 if (has_cvq) {
385 return 0;
386 }
387
388 for (int i = 0; i < v->dev->nvqs; ++i) {
389 vhost_vdpa_set_vring_ready(v, i + v->dev->vq_index);
390 }
391 return 0;
392 }
393
394 static void vhost_vdpa_net_client_stop(NetClientState *nc)
395 {
396 VhostVDPAState *s = DO_UPCAST(VhostVDPAState, nc, nc);
397 struct vhost_dev *dev;
398
399 assert(nc->info->type == NET_CLIENT_DRIVER_VHOST_VDPA);
400
401 if (s->vhost_vdpa.index == 0) {
402 remove_migration_state_change_notifier(&s->migration_state);
403 }
404
405 dev = s->vhost_vdpa.dev;
406 if (dev->vq_index + dev->nvqs == dev->vq_index_end) {
407 g_clear_pointer(&s->vhost_vdpa.iova_tree, vhost_iova_tree_delete);
408 }
409 }
410
411 static NetClientInfo net_vhost_vdpa_info = {
412 .type = NET_CLIENT_DRIVER_VHOST_VDPA,
413 .size = sizeof(VhostVDPAState),
414 .receive = vhost_vdpa_receive,
415 .start = vhost_vdpa_net_data_start,
416 .load = vhost_vdpa_net_data_load,
417 .stop = vhost_vdpa_net_client_stop,
418 .cleanup = vhost_vdpa_cleanup,
419 .has_vnet_hdr = vhost_vdpa_has_vnet_hdr,
420 .has_ufo = vhost_vdpa_has_ufo,
421 .check_peer_type = vhost_vdpa_check_peer_type,
422 };
423
424 static int64_t vhost_vdpa_get_vring_group(int device_fd, unsigned vq_index,
425 Error **errp)
426 {
427 struct vhost_vring_state state = {
428 .index = vq_index,
429 };
430 int r = ioctl(device_fd, VHOST_VDPA_GET_VRING_GROUP, &state);
431
432 if (unlikely(r < 0)) {
433 r = -errno;
434 error_setg_errno(errp, errno, "Cannot get VQ %u group", vq_index);
435 return r;
436 }
437
438 return state.num;
439 }
440
441 static int vhost_vdpa_set_address_space_id(struct vhost_vdpa *v,
442 unsigned vq_group,
443 unsigned asid_num)
444 {
445 struct vhost_vring_state asid = {
446 .index = vq_group,
447 .num = asid_num,
448 };
449 int r;
450
451 r = ioctl(v->device_fd, VHOST_VDPA_SET_GROUP_ASID, &asid);
452 if (unlikely(r < 0)) {
453 error_report("Can't set vq group %u asid %u, errno=%d (%s)",
454 asid.index, asid.num, errno, g_strerror(errno));
455 }
456 return r;
457 }
458
459 static void vhost_vdpa_cvq_unmap_buf(struct vhost_vdpa *v, void *addr)
460 {
461 VhostIOVATree *tree = v->iova_tree;
462 DMAMap needle = {
463 /*
464 * No need to specify size or to look for more translations since
465 * this contiguous chunk was allocated by us.
466 */
467 .translated_addr = (hwaddr)(uintptr_t)addr,
468 };
469 const DMAMap *map = vhost_iova_tree_find_iova(tree, &needle);
470 int r;
471
472 if (unlikely(!map)) {
473 error_report("Cannot locate expected map");
474 return;
475 }
476
477 r = vhost_vdpa_dma_unmap(v, v->address_space_id, map->iova, map->size + 1);
478 if (unlikely(r != 0)) {
479 error_report("Device cannot unmap: %s(%d)", g_strerror(r), r);
480 }
481
482 vhost_iova_tree_remove(tree, *map);
483 }
484
485 /** Map CVQ buffer. */
486 static int vhost_vdpa_cvq_map_buf(struct vhost_vdpa *v, void *buf, size_t size,
487 bool write)
488 {
489 DMAMap map = {};
490 int r;
491
492 map.translated_addr = (hwaddr)(uintptr_t)buf;
493 map.size = size - 1;
494 map.perm = write ? IOMMU_RW : IOMMU_RO,
495 r = vhost_iova_tree_map_alloc(v->iova_tree, &map);
496 if (unlikely(r != IOVA_OK)) {
497 error_report("Cannot map injected element");
498 return r;
499 }
500
501 r = vhost_vdpa_dma_map(v, v->address_space_id, map.iova,
502 vhost_vdpa_net_cvq_cmd_page_len(), buf, !write);
503 if (unlikely(r < 0)) {
504 goto dma_map_err;
505 }
506
507 return 0;
508
509 dma_map_err:
510 vhost_iova_tree_remove(v->iova_tree, map);
511 return r;
512 }
513
514 static int vhost_vdpa_net_cvq_start(NetClientState *nc)
515 {
516 VhostVDPAState *s, *s0;
517 struct vhost_vdpa *v;
518 int64_t cvq_group;
519 int r;
520 Error *err = NULL;
521
522 assert(nc->info->type == NET_CLIENT_DRIVER_VHOST_VDPA);
523
524 s = DO_UPCAST(VhostVDPAState, nc, nc);
525 v = &s->vhost_vdpa;
526
527 s0 = vhost_vdpa_net_first_nc_vdpa(s);
528 v->shadow_data = s0->vhost_vdpa.shadow_vqs_enabled;
529 v->shadow_vqs_enabled = s0->vhost_vdpa.shadow_vqs_enabled;
530 s->vhost_vdpa.address_space_id = VHOST_VDPA_GUEST_PA_ASID;
531
532 if (s->vhost_vdpa.shadow_data) {
533 /* SVQ is already configured for all virtqueues */
534 goto out;
535 }
536
537 /*
538 * If we early return in these cases SVQ will not be enabled. The migration
539 * will be blocked as long as vhost-vdpa backends will not offer _F_LOG.
540 */
541 if (!vhost_vdpa_net_valid_svq_features(v->dev->features, NULL)) {
542 return 0;
543 }
544
545 if (!s->cvq_isolated) {
546 return 0;
547 }
548
549 cvq_group = vhost_vdpa_get_vring_group(v->device_fd,
550 v->dev->vq_index_end - 1,
551 &err);
552 if (unlikely(cvq_group < 0)) {
553 error_report_err(err);
554 return cvq_group;
555 }
556
557 r = vhost_vdpa_set_address_space_id(v, cvq_group, VHOST_VDPA_NET_CVQ_ASID);
558 if (unlikely(r < 0)) {
559 return r;
560 }
561
562 v->shadow_vqs_enabled = true;
563 s->vhost_vdpa.address_space_id = VHOST_VDPA_NET_CVQ_ASID;
564
565 out:
566 if (!s->vhost_vdpa.shadow_vqs_enabled) {
567 return 0;
568 }
569
570 if (s0->vhost_vdpa.iova_tree) {
571 /*
572 * SVQ is already configured for all virtqueues. Reuse IOVA tree for
573 * simplicity, whether CVQ shares ASID with guest or not, because:
574 * - Memory listener need access to guest's memory addresses allocated
575 * in the IOVA tree.
576 * - There should be plenty of IOVA address space for both ASID not to
577 * worry about collisions between them. Guest's translations are
578 * still validated with virtio virtqueue_pop so there is no risk for
579 * the guest to access memory that it shouldn't.
580 *
581 * To allocate a iova tree per ASID is doable but it complicates the
582 * code and it is not worth it for the moment.
583 */
584 v->iova_tree = s0->vhost_vdpa.iova_tree;
585 } else {
586 v->iova_tree = vhost_iova_tree_new(v->iova_range.first,
587 v->iova_range.last);
588 }
589
590 r = vhost_vdpa_cvq_map_buf(&s->vhost_vdpa, s->cvq_cmd_out_buffer,
591 vhost_vdpa_net_cvq_cmd_page_len(), false);
592 if (unlikely(r < 0)) {
593 return r;
594 }
595
596 r = vhost_vdpa_cvq_map_buf(&s->vhost_vdpa, s->status,
597 vhost_vdpa_net_cvq_cmd_page_len(), true);
598 if (unlikely(r < 0)) {
599 vhost_vdpa_cvq_unmap_buf(&s->vhost_vdpa, s->cvq_cmd_out_buffer);
600 }
601
602 return r;
603 }
604
605 static void vhost_vdpa_net_cvq_stop(NetClientState *nc)
606 {
607 VhostVDPAState *s = DO_UPCAST(VhostVDPAState, nc, nc);
608
609 assert(nc->info->type == NET_CLIENT_DRIVER_VHOST_VDPA);
610
611 if (s->vhost_vdpa.shadow_vqs_enabled) {
612 vhost_vdpa_cvq_unmap_buf(&s->vhost_vdpa, s->cvq_cmd_out_buffer);
613 vhost_vdpa_cvq_unmap_buf(&s->vhost_vdpa, s->status);
614 }
615
616 vhost_vdpa_net_client_stop(nc);
617 }
618
619 static ssize_t vhost_vdpa_net_cvq_add(VhostVDPAState *s, size_t out_len,
620 size_t in_len)
621 {
622 /* Buffers for the device */
623 const struct iovec out = {
624 .iov_base = s->cvq_cmd_out_buffer,
625 .iov_len = out_len,
626 };
627 const struct iovec in = {
628 .iov_base = s->status,
629 .iov_len = sizeof(virtio_net_ctrl_ack),
630 };
631 VhostShadowVirtqueue *svq = g_ptr_array_index(s->vhost_vdpa.shadow_vqs, 0);
632 int r;
633
634 r = vhost_svq_add(svq, &out, 1, &in, 1, NULL);
635 if (unlikely(r != 0)) {
636 if (unlikely(r == -ENOSPC)) {
637 qemu_log_mask(LOG_GUEST_ERROR, "%s: No space on device queue\n",
638 __func__);
639 }
640 return r;
641 }
642
643 /*
644 * We can poll here since we've had BQL from the time we sent the
645 * descriptor. Also, we need to take the answer before SVQ pulls by itself,
646 * when BQL is released
647 */
648 return vhost_svq_poll(svq);
649 }
650
651 static ssize_t vhost_vdpa_net_load_cmd(VhostVDPAState *s, uint8_t class,
652 uint8_t cmd, const struct iovec *data_sg,
653 size_t data_num)
654 {
655 const struct virtio_net_ctrl_hdr ctrl = {
656 .class = class,
657 .cmd = cmd,
658 };
659 size_t data_size = iov_size(data_sg, data_num);
660
661 assert(data_size < vhost_vdpa_net_cvq_cmd_page_len() - sizeof(ctrl));
662
663 /* pack the CVQ command header */
664 memcpy(s->cvq_cmd_out_buffer, &ctrl, sizeof(ctrl));
665
666 /* pack the CVQ command command-specific-data */
667 iov_to_buf(data_sg, data_num, 0,
668 s->cvq_cmd_out_buffer + sizeof(ctrl), data_size);
669
670 return vhost_vdpa_net_cvq_add(s, data_size + sizeof(ctrl),
671 sizeof(virtio_net_ctrl_ack));
672 }
673
674 static int vhost_vdpa_net_load_mac(VhostVDPAState *s, const VirtIONet *n)
675 {
676 if (virtio_vdev_has_feature(&n->parent_obj, VIRTIO_NET_F_CTRL_MAC_ADDR)) {
677 const struct iovec data = {
678 .iov_base = (void *)n->mac,
679 .iov_len = sizeof(n->mac),
680 };
681 ssize_t dev_written = vhost_vdpa_net_load_cmd(s, VIRTIO_NET_CTRL_MAC,
682 VIRTIO_NET_CTRL_MAC_ADDR_SET,
683 &data, 1);
684 if (unlikely(dev_written < 0)) {
685 return dev_written;
686 }
687 if (*s->status != VIRTIO_NET_OK) {
688 return -EIO;
689 }
690 }
691
692 /*
693 * According to VirtIO standard, "The device MUST have an
694 * empty MAC filtering table on reset.".
695 *
696 * Therefore, there is no need to send this CVQ command if the
697 * driver also sets an empty MAC filter table, which aligns with
698 * the device's defaults.
699 *
700 * Note that the device's defaults can mismatch the driver's
701 * configuration only at live migration.
702 */
703 if (!virtio_vdev_has_feature(&n->parent_obj, VIRTIO_NET_F_CTRL_RX) ||
704 n->mac_table.in_use == 0) {
705 return 0;
706 }
707
708 uint32_t uni_entries = n->mac_table.first_multi,
709 uni_macs_size = uni_entries * ETH_ALEN,
710 mul_entries = n->mac_table.in_use - uni_entries,
711 mul_macs_size = mul_entries * ETH_ALEN;
712 struct virtio_net_ctrl_mac uni = {
713 .entries = cpu_to_le32(uni_entries),
714 };
715 struct virtio_net_ctrl_mac mul = {
716 .entries = cpu_to_le32(mul_entries),
717 };
718 const struct iovec data[] = {
719 {
720 .iov_base = &uni,
721 .iov_len = sizeof(uni),
722 }, {
723 .iov_base = n->mac_table.macs,
724 .iov_len = uni_macs_size,
725 }, {
726 .iov_base = &mul,
727 .iov_len = sizeof(mul),
728 }, {
729 .iov_base = &n->mac_table.macs[uni_macs_size],
730 .iov_len = mul_macs_size,
731 },
732 };
733 ssize_t dev_written = vhost_vdpa_net_load_cmd(s,
734 VIRTIO_NET_CTRL_MAC,
735 VIRTIO_NET_CTRL_MAC_TABLE_SET,
736 data, ARRAY_SIZE(data));
737 if (unlikely(dev_written < 0)) {
738 return dev_written;
739 }
740 if (*s->status != VIRTIO_NET_OK) {
741 return -EIO;
742 }
743
744 return 0;
745 }
746
747 static int vhost_vdpa_net_load_mq(VhostVDPAState *s,
748 const VirtIONet *n)
749 {
750 struct virtio_net_ctrl_mq mq;
751 ssize_t dev_written;
752
753 if (!virtio_vdev_has_feature(&n->parent_obj, VIRTIO_NET_F_MQ)) {
754 return 0;
755 }
756
757 mq.virtqueue_pairs = cpu_to_le16(n->curr_queue_pairs);
758 const struct iovec data = {
759 .iov_base = &mq,
760 .iov_len = sizeof(mq),
761 };
762 dev_written = vhost_vdpa_net_load_cmd(s, VIRTIO_NET_CTRL_MQ,
763 VIRTIO_NET_CTRL_MQ_VQ_PAIRS_SET,
764 &data, 1);
765 if (unlikely(dev_written < 0)) {
766 return dev_written;
767 }
768 if (*s->status != VIRTIO_NET_OK) {
769 return -EIO;
770 }
771
772 return 0;
773 }
774
775 static int vhost_vdpa_net_load_offloads(VhostVDPAState *s,
776 const VirtIONet *n)
777 {
778 uint64_t offloads;
779 ssize_t dev_written;
780
781 if (!virtio_vdev_has_feature(&n->parent_obj,
782 VIRTIO_NET_F_CTRL_GUEST_OFFLOADS)) {
783 return 0;
784 }
785
786 if (n->curr_guest_offloads == virtio_net_supported_guest_offloads(n)) {
787 /*
788 * According to VirtIO standard, "Upon feature negotiation
789 * corresponding offload gets enabled to preserve
790 * backward compatibility.".
791 *
792 * Therefore, there is no need to send this CVQ command if the
793 * driver also enables all supported offloads, which aligns with
794 * the device's defaults.
795 *
796 * Note that the device's defaults can mismatch the driver's
797 * configuration only at live migration.
798 */
799 return 0;
800 }
801
802 offloads = cpu_to_le64(n->curr_guest_offloads);
803 const struct iovec data = {
804 .iov_base = &offloads,
805 .iov_len = sizeof(offloads),
806 };
807 dev_written = vhost_vdpa_net_load_cmd(s, VIRTIO_NET_CTRL_GUEST_OFFLOADS,
808 VIRTIO_NET_CTRL_GUEST_OFFLOADS_SET,
809 &data, 1);
810 if (unlikely(dev_written < 0)) {
811 return dev_written;
812 }
813 if (*s->status != VIRTIO_NET_OK) {
814 return -EIO;
815 }
816
817 return 0;
818 }
819
820 static int vhost_vdpa_net_load_rx_mode(VhostVDPAState *s,
821 uint8_t cmd,
822 uint8_t on)
823 {
824 const struct iovec data = {
825 .iov_base = &on,
826 .iov_len = sizeof(on),
827 };
828 return vhost_vdpa_net_load_cmd(s, VIRTIO_NET_CTRL_RX,
829 cmd, &data, 1);
830 }
831
832 static int vhost_vdpa_net_load_rx(VhostVDPAState *s,
833 const VirtIONet *n)
834 {
835 ssize_t dev_written;
836
837 if (!virtio_vdev_has_feature(&n->parent_obj, VIRTIO_NET_F_CTRL_RX)) {
838 return 0;
839 }
840
841 /*
842 * According to virtio_net_reset(), device turns promiscuous mode
843 * on by default.
844 *
845 * Additionally, according to VirtIO standard, "Since there are
846 * no guarantees, it can use a hash filter or silently switch to
847 * allmulti or promiscuous mode if it is given too many addresses.".
848 * QEMU marks `n->mac_table.uni_overflow` if guest sets too many
849 * non-multicast MAC addresses, indicating that promiscuous mode
850 * should be enabled.
851 *
852 * Therefore, QEMU should only send this CVQ command if the
853 * `n->mac_table.uni_overflow` is not marked and `n->promisc` is off,
854 * which sets promiscuous mode on, different from the device's defaults.
855 *
856 * Note that the device's defaults can mismatch the driver's
857 * configuration only at live migration.
858 */
859 if (!n->mac_table.uni_overflow && !n->promisc) {
860 dev_written = vhost_vdpa_net_load_rx_mode(s,
861 VIRTIO_NET_CTRL_RX_PROMISC, 0);
862 if (unlikely(dev_written < 0)) {
863 return dev_written;
864 }
865 if (*s->status != VIRTIO_NET_OK) {
866 return -EIO;
867 }
868 }
869
870 /*
871 * According to virtio_net_reset(), device turns all-multicast mode
872 * off by default.
873 *
874 * According to VirtIO standard, "Since there are no guarantees,
875 * it can use a hash filter or silently switch to allmulti or
876 * promiscuous mode if it is given too many addresses.". QEMU marks
877 * `n->mac_table.multi_overflow` if guest sets too many
878 * non-multicast MAC addresses.
879 *
880 * Therefore, QEMU should only send this CVQ command if the
881 * `n->mac_table.multi_overflow` is marked or `n->allmulti` is on,
882 * which sets all-multicast mode on, different from the device's defaults.
883 *
884 * Note that the device's defaults can mismatch the driver's
885 * configuration only at live migration.
886 */
887 if (n->mac_table.multi_overflow || n->allmulti) {
888 dev_written = vhost_vdpa_net_load_rx_mode(s,
889 VIRTIO_NET_CTRL_RX_ALLMULTI, 1);
890 if (unlikely(dev_written < 0)) {
891 return dev_written;
892 }
893 if (*s->status != VIRTIO_NET_OK) {
894 return -EIO;
895 }
896 }
897
898 if (!virtio_vdev_has_feature(&n->parent_obj, VIRTIO_NET_F_CTRL_RX_EXTRA)) {
899 return 0;
900 }
901
902 /*
903 * According to virtio_net_reset(), device turns all-unicast mode
904 * off by default.
905 *
906 * Therefore, QEMU should only send this CVQ command if the driver
907 * sets all-unicast mode on, different from the device's defaults.
908 *
909 * Note that the device's defaults can mismatch the driver's
910 * configuration only at live migration.
911 */
912 if (n->alluni) {
913 dev_written = vhost_vdpa_net_load_rx_mode(s,
914 VIRTIO_NET_CTRL_RX_ALLUNI, 1);
915 if (dev_written < 0) {
916 return dev_written;
917 }
918 if (*s->status != VIRTIO_NET_OK) {
919 return -EIO;
920 }
921 }
922
923 /*
924 * According to virtio_net_reset(), device turns non-multicast mode
925 * off by default.
926 *
927 * Therefore, QEMU should only send this CVQ command if the driver
928 * sets non-multicast mode on, different from the device's defaults.
929 *
930 * Note that the device's defaults can mismatch the driver's
931 * configuration only at live migration.
932 */
933 if (n->nomulti) {
934 dev_written = vhost_vdpa_net_load_rx_mode(s,
935 VIRTIO_NET_CTRL_RX_NOMULTI, 1);
936 if (dev_written < 0) {
937 return dev_written;
938 }
939 if (*s->status != VIRTIO_NET_OK) {
940 return -EIO;
941 }
942 }
943
944 /*
945 * According to virtio_net_reset(), device turns non-unicast mode
946 * off by default.
947 *
948 * Therefore, QEMU should only send this CVQ command if the driver
949 * sets non-unicast mode on, different from the device's defaults.
950 *
951 * Note that the device's defaults can mismatch the driver's
952 * configuration only at live migration.
953 */
954 if (n->nouni) {
955 dev_written = vhost_vdpa_net_load_rx_mode(s,
956 VIRTIO_NET_CTRL_RX_NOUNI, 1);
957 if (dev_written < 0) {
958 return dev_written;
959 }
960 if (*s->status != VIRTIO_NET_OK) {
961 return -EIO;
962 }
963 }
964
965 /*
966 * According to virtio_net_reset(), device turns non-broadcast mode
967 * off by default.
968 *
969 * Therefore, QEMU should only send this CVQ command if the driver
970 * sets non-broadcast mode on, different from the device's defaults.
971 *
972 * Note that the device's defaults can mismatch the driver's
973 * configuration only at live migration.
974 */
975 if (n->nobcast) {
976 dev_written = vhost_vdpa_net_load_rx_mode(s,
977 VIRTIO_NET_CTRL_RX_NOBCAST, 1);
978 if (dev_written < 0) {
979 return dev_written;
980 }
981 if (*s->status != VIRTIO_NET_OK) {
982 return -EIO;
983 }
984 }
985
986 return 0;
987 }
988
989 static int vhost_vdpa_net_load_single_vlan(VhostVDPAState *s,
990 const VirtIONet *n,
991 uint16_t vid)
992 {
993 const struct iovec data = {
994 .iov_base = &vid,
995 .iov_len = sizeof(vid),
996 };
997 ssize_t dev_written = vhost_vdpa_net_load_cmd(s, VIRTIO_NET_CTRL_VLAN,
998 VIRTIO_NET_CTRL_VLAN_ADD,
999 &data, 1);
1000 if (unlikely(dev_written < 0)) {
1001 return dev_written;
1002 }
1003 if (unlikely(*s->status != VIRTIO_NET_OK)) {
1004 return -EIO;
1005 }
1006
1007 return 0;
1008 }
1009
1010 static int vhost_vdpa_net_load_vlan(VhostVDPAState *s,
1011 const VirtIONet *n)
1012 {
1013 int r;
1014
1015 if (!virtio_vdev_has_feature(&n->parent_obj, VIRTIO_NET_F_CTRL_VLAN)) {
1016 return 0;
1017 }
1018
1019 for (int i = 0; i < MAX_VLAN >> 5; i++) {
1020 for (int j = 0; n->vlans[i] && j <= 0x1f; j++) {
1021 if (n->vlans[i] & (1U << j)) {
1022 r = vhost_vdpa_net_load_single_vlan(s, n, (i << 5) + j);
1023 if (unlikely(r != 0)) {
1024 return r;
1025 }
1026 }
1027 }
1028 }
1029
1030 return 0;
1031 }
1032
1033 static int vhost_vdpa_net_cvq_load(NetClientState *nc)
1034 {
1035 VhostVDPAState *s = DO_UPCAST(VhostVDPAState, nc, nc);
1036 struct vhost_vdpa *v = &s->vhost_vdpa;
1037 const VirtIONet *n;
1038 int r;
1039
1040 assert(nc->info->type == NET_CLIENT_DRIVER_VHOST_VDPA);
1041
1042 vhost_vdpa_set_vring_ready(v, v->dev->vq_index);
1043
1044 if (v->shadow_vqs_enabled) {
1045 n = VIRTIO_NET(v->dev->vdev);
1046 r = vhost_vdpa_net_load_mac(s, n);
1047 if (unlikely(r < 0)) {
1048 return r;
1049 }
1050 r = vhost_vdpa_net_load_mq(s, n);
1051 if (unlikely(r)) {
1052 return r;
1053 }
1054 r = vhost_vdpa_net_load_offloads(s, n);
1055 if (unlikely(r)) {
1056 return r;
1057 }
1058 r = vhost_vdpa_net_load_rx(s, n);
1059 if (unlikely(r)) {
1060 return r;
1061 }
1062 r = vhost_vdpa_net_load_vlan(s, n);
1063 if (unlikely(r)) {
1064 return r;
1065 }
1066 }
1067
1068 for (int i = 0; i < v->dev->vq_index; ++i) {
1069 vhost_vdpa_set_vring_ready(v, i);
1070 }
1071
1072 return 0;
1073 }
1074
1075 static NetClientInfo net_vhost_vdpa_cvq_info = {
1076 .type = NET_CLIENT_DRIVER_VHOST_VDPA,
1077 .size = sizeof(VhostVDPAState),
1078 .receive = vhost_vdpa_receive,
1079 .start = vhost_vdpa_net_cvq_start,
1080 .load = vhost_vdpa_net_cvq_load,
1081 .stop = vhost_vdpa_net_cvq_stop,
1082 .cleanup = vhost_vdpa_cleanup,
1083 .has_vnet_hdr = vhost_vdpa_has_vnet_hdr,
1084 .has_ufo = vhost_vdpa_has_ufo,
1085 .check_peer_type = vhost_vdpa_check_peer_type,
1086 };
1087
1088 /*
1089 * Forward the excessive VIRTIO_NET_CTRL_MAC_TABLE_SET CVQ command to
1090 * vdpa device.
1091 *
1092 * Considering that QEMU cannot send the entire filter table to the
1093 * vdpa device, it should send the VIRTIO_NET_CTRL_RX_PROMISC CVQ
1094 * command to enable promiscuous mode to receive all packets,
1095 * according to VirtIO standard, "Since there are no guarantees,
1096 * it can use a hash filter or silently switch to allmulti or
1097 * promiscuous mode if it is given too many addresses.".
1098 *
1099 * Since QEMU ignores MAC addresses beyond `MAC_TABLE_ENTRIES` and
1100 * marks `n->mac_table.x_overflow` accordingly, it should have
1101 * the same effect on the device model to receive
1102 * (`MAC_TABLE_ENTRIES` + 1) or more non-multicast MAC addresses.
1103 * The same applies to multicast MAC addresses.
1104 *
1105 * Therefore, QEMU can provide the device model with a fake
1106 * VIRTIO_NET_CTRL_MAC_TABLE_SET command with (`MAC_TABLE_ENTRIES` + 1)
1107 * non-multicast MAC addresses and (`MAC_TABLE_ENTRIES` + 1) multicast
1108 * MAC addresses. This ensures that the device model marks
1109 * `n->mac_table.uni_overflow` and `n->mac_table.multi_overflow`,
1110 * allowing all packets to be received, which aligns with the
1111 * state of the vdpa device.
1112 */
1113 static int vhost_vdpa_net_excessive_mac_filter_cvq_add(VhostVDPAState *s,
1114 VirtQueueElement *elem,
1115 struct iovec *out)
1116 {
1117 struct virtio_net_ctrl_mac mac_data, *mac_ptr;
1118 struct virtio_net_ctrl_hdr *hdr_ptr;
1119 uint32_t cursor;
1120 ssize_t r;
1121
1122 /* parse the non-multicast MAC address entries from CVQ command */
1123 cursor = sizeof(*hdr_ptr);
1124 r = iov_to_buf(elem->out_sg, elem->out_num, cursor,
1125 &mac_data, sizeof(mac_data));
1126 if (unlikely(r != sizeof(mac_data))) {
1127 /*
1128 * If the CVQ command is invalid, we should simulate the vdpa device
1129 * to reject the VIRTIO_NET_CTRL_MAC_TABLE_SET CVQ command
1130 */
1131 *s->status = VIRTIO_NET_ERR;
1132 return sizeof(*s->status);
1133 }
1134 cursor += sizeof(mac_data) + le32_to_cpu(mac_data.entries) * ETH_ALEN;
1135
1136 /* parse the multicast MAC address entries from CVQ command */
1137 r = iov_to_buf(elem->out_sg, elem->out_num, cursor,
1138 &mac_data, sizeof(mac_data));
1139 if (r != sizeof(mac_data)) {
1140 /*
1141 * If the CVQ command is invalid, we should simulate the vdpa device
1142 * to reject the VIRTIO_NET_CTRL_MAC_TABLE_SET CVQ command
1143 */
1144 *s->status = VIRTIO_NET_ERR;
1145 return sizeof(*s->status);
1146 }
1147 cursor += sizeof(mac_data) + le32_to_cpu(mac_data.entries) * ETH_ALEN;
1148
1149 /* validate the CVQ command */
1150 if (iov_size(elem->out_sg, elem->out_num) != cursor) {
1151 /*
1152 * If the CVQ command is invalid, we should simulate the vdpa device
1153 * to reject the VIRTIO_NET_CTRL_MAC_TABLE_SET CVQ command
1154 */
1155 *s->status = VIRTIO_NET_ERR;
1156 return sizeof(*s->status);
1157 }
1158
1159 /*
1160 * According to VirtIO standard, "Since there are no guarantees,
1161 * it can use a hash filter or silently switch to allmulti or
1162 * promiscuous mode if it is given too many addresses.".
1163 *
1164 * Therefore, considering that QEMU is unable to send the entire
1165 * filter table to the vdpa device, it should send the
1166 * VIRTIO_NET_CTRL_RX_PROMISC CVQ command to enable promiscuous mode
1167 */
1168 r = vhost_vdpa_net_load_rx_mode(s, VIRTIO_NET_CTRL_RX_PROMISC, 1);
1169 if (unlikely(r < 0)) {
1170 return r;
1171 }
1172 if (*s->status != VIRTIO_NET_OK) {
1173 return sizeof(*s->status);
1174 }
1175
1176 /*
1177 * QEMU should also send a fake VIRTIO_NET_CTRL_MAC_TABLE_SET CVQ
1178 * command to the device model, including (`MAC_TABLE_ENTRIES` + 1)
1179 * non-multicast MAC addresses and (`MAC_TABLE_ENTRIES` + 1)
1180 * multicast MAC addresses.
1181 *
1182 * By doing so, the device model can mark `n->mac_table.uni_overflow`
1183 * and `n->mac_table.multi_overflow`, enabling all packets to be
1184 * received, which aligns with the state of the vdpa device.
1185 */
1186 cursor = 0;
1187 uint32_t fake_uni_entries = MAC_TABLE_ENTRIES + 1,
1188 fake_mul_entries = MAC_TABLE_ENTRIES + 1,
1189 fake_cvq_size = sizeof(struct virtio_net_ctrl_hdr) +
1190 sizeof(mac_data) + fake_uni_entries * ETH_ALEN +
1191 sizeof(mac_data) + fake_mul_entries * ETH_ALEN;
1192
1193 assert(fake_cvq_size < vhost_vdpa_net_cvq_cmd_page_len());
1194 out->iov_len = fake_cvq_size;
1195
1196 /* pack the header for fake CVQ command */
1197 hdr_ptr = out->iov_base + cursor;
1198 hdr_ptr->class = VIRTIO_NET_CTRL_MAC;
1199 hdr_ptr->cmd = VIRTIO_NET_CTRL_MAC_TABLE_SET;
1200 cursor += sizeof(*hdr_ptr);
1201
1202 /*
1203 * Pack the non-multicast MAC addresses part for fake CVQ command.
1204 *
1205 * According to virtio_net_handle_mac(), QEMU doesn't verify the MAC
1206 * addresses provided in CVQ command. Therefore, only the entries
1207 * field need to be prepared in the CVQ command.
1208 */
1209 mac_ptr = out->iov_base + cursor;
1210 mac_ptr->entries = cpu_to_le32(fake_uni_entries);
1211 cursor += sizeof(*mac_ptr) + fake_uni_entries * ETH_ALEN;
1212
1213 /*
1214 * Pack the multicast MAC addresses part for fake CVQ command.
1215 *
1216 * According to virtio_net_handle_mac(), QEMU doesn't verify the MAC
1217 * addresses provided in CVQ command. Therefore, only the entries
1218 * field need to be prepared in the CVQ command.
1219 */
1220 mac_ptr = out->iov_base + cursor;
1221 mac_ptr->entries = cpu_to_le32(fake_mul_entries);
1222
1223 /*
1224 * Simulating QEMU poll a vdpa device used buffer
1225 * for VIRTIO_NET_CTRL_MAC_TABLE_SET CVQ command
1226 */
1227 return sizeof(*s->status);
1228 }
1229
1230 /**
1231 * Validate and copy control virtqueue commands.
1232 *
1233 * Following QEMU guidelines, we offer a copy of the buffers to the device to
1234 * prevent TOCTOU bugs.
1235 */
1236 static int vhost_vdpa_net_handle_ctrl_avail(VhostShadowVirtqueue *svq,
1237 VirtQueueElement *elem,
1238 void *opaque)
1239 {
1240 VhostVDPAState *s = opaque;
1241 size_t in_len;
1242 const struct virtio_net_ctrl_hdr *ctrl;
1243 virtio_net_ctrl_ack status = VIRTIO_NET_ERR;
1244 /* Out buffer sent to both the vdpa device and the device model */
1245 struct iovec out = {
1246 .iov_base = s->cvq_cmd_out_buffer,
1247 };
1248 /* in buffer used for device model */
1249 const struct iovec in = {
1250 .iov_base = &status,
1251 .iov_len = sizeof(status),
1252 };
1253 ssize_t dev_written = -EINVAL;
1254
1255 out.iov_len = iov_to_buf(elem->out_sg, elem->out_num, 0,
1256 s->cvq_cmd_out_buffer,
1257 vhost_vdpa_net_cvq_cmd_page_len());
1258
1259 ctrl = s->cvq_cmd_out_buffer;
1260 if (ctrl->class == VIRTIO_NET_CTRL_ANNOUNCE) {
1261 /*
1262 * Guest announce capability is emulated by qemu, so don't forward to
1263 * the device.
1264 */
1265 dev_written = sizeof(status);
1266 *s->status = VIRTIO_NET_OK;
1267 } else if (unlikely(ctrl->class == VIRTIO_NET_CTRL_MAC &&
1268 ctrl->cmd == VIRTIO_NET_CTRL_MAC_TABLE_SET &&
1269 iov_size(elem->out_sg, elem->out_num) > out.iov_len)) {
1270 /*
1271 * Due to the size limitation of the out buffer sent to the vdpa device,
1272 * which is determined by vhost_vdpa_net_cvq_cmd_page_len(), excessive
1273 * MAC addresses set by the driver for the filter table can cause
1274 * truncation of the CVQ command in QEMU. As a result, the vdpa device
1275 * rejects the flawed CVQ command.
1276 *
1277 * Therefore, QEMU must handle this situation instead of sending
1278 * the CVQ command directly.
1279 */
1280 dev_written = vhost_vdpa_net_excessive_mac_filter_cvq_add(s, elem,
1281 &out);
1282 if (unlikely(dev_written < 0)) {
1283 goto out;
1284 }
1285 } else {
1286 dev_written = vhost_vdpa_net_cvq_add(s, out.iov_len, sizeof(status));
1287 if (unlikely(dev_written < 0)) {
1288 goto out;
1289 }
1290 }
1291
1292 if (unlikely(dev_written < sizeof(status))) {
1293 error_report("Insufficient written data (%zu)", dev_written);
1294 goto out;
1295 }
1296
1297 if (*s->status != VIRTIO_NET_OK) {
1298 goto out;
1299 }
1300
1301 status = VIRTIO_NET_ERR;
1302 virtio_net_handle_ctrl_iov(svq->vdev, &in, 1, &out, 1);
1303 if (status != VIRTIO_NET_OK) {
1304 error_report("Bad CVQ processing in model");
1305 }
1306
1307 out:
1308 in_len = iov_from_buf(elem->in_sg, elem->in_num, 0, &status,
1309 sizeof(status));
1310 if (unlikely(in_len < sizeof(status))) {
1311 error_report("Bad device CVQ written length");
1312 }
1313 vhost_svq_push_elem(svq, elem, MIN(in_len, sizeof(status)));
1314 /*
1315 * `elem` belongs to vhost_vdpa_net_handle_ctrl_avail() only when
1316 * the function successfully forwards the CVQ command, indicated
1317 * by a non-negative value of `dev_written`. Otherwise, it still
1318 * belongs to SVQ.
1319 * This function should only free the `elem` when it owns.
1320 */
1321 if (dev_written >= 0) {
1322 g_free(elem);
1323 }
1324 return dev_written < 0 ? dev_written : 0;
1325 }
1326
1327 static const VhostShadowVirtqueueOps vhost_vdpa_net_svq_ops = {
1328 .avail_handler = vhost_vdpa_net_handle_ctrl_avail,
1329 };
1330
1331 /**
1332 * Probe if CVQ is isolated
1333 *
1334 * @device_fd The vdpa device fd
1335 * @features Features offered by the device.
1336 * @cvq_index The control vq pair index
1337 *
1338 * Returns <0 in case of failure, 0 if false and 1 if true.
1339 */
1340 static int vhost_vdpa_probe_cvq_isolation(int device_fd, uint64_t features,
1341 int cvq_index, Error **errp)
1342 {
1343 uint64_t backend_features;
1344 int64_t cvq_group;
1345 uint8_t status = VIRTIO_CONFIG_S_ACKNOWLEDGE |
1346 VIRTIO_CONFIG_S_DRIVER |
1347 VIRTIO_CONFIG_S_FEATURES_OK;
1348 int r;
1349
1350 ERRP_GUARD();
1351
1352 r = ioctl(device_fd, VHOST_GET_BACKEND_FEATURES, &backend_features);
1353 if (unlikely(r < 0)) {
1354 error_setg_errno(errp, errno, "Cannot get vdpa backend_features");
1355 return r;
1356 }
1357
1358 if (!(backend_features & BIT_ULL(VHOST_BACKEND_F_IOTLB_ASID))) {
1359 return 0;
1360 }
1361
1362 r = ioctl(device_fd, VHOST_SET_FEATURES, &features);
1363 if (unlikely(r)) {
1364 error_setg_errno(errp, errno, "Cannot set features");
1365 }
1366
1367 r = ioctl(device_fd, VHOST_VDPA_SET_STATUS, &status);
1368 if (unlikely(r)) {
1369 error_setg_errno(errp, -r, "Cannot set device features");
1370 goto out;
1371 }
1372
1373 cvq_group = vhost_vdpa_get_vring_group(device_fd, cvq_index, errp);
1374 if (unlikely(cvq_group < 0)) {
1375 if (cvq_group != -ENOTSUP) {
1376 r = cvq_group;
1377 goto out;
1378 }
1379
1380 /*
1381 * The kernel report VHOST_BACKEND_F_IOTLB_ASID if the vdpa frontend
1382 * support ASID even if the parent driver does not. The CVQ cannot be
1383 * isolated in this case.
1384 */
1385 error_free(*errp);
1386 *errp = NULL;
1387 r = 0;
1388 goto out;
1389 }
1390
1391 for (int i = 0; i < cvq_index; ++i) {
1392 int64_t group = vhost_vdpa_get_vring_group(device_fd, i, errp);
1393 if (unlikely(group < 0)) {
1394 r = group;
1395 goto out;
1396 }
1397
1398 if (group == (int64_t)cvq_group) {
1399 r = 0;
1400 goto out;
1401 }
1402 }
1403
1404 r = 1;
1405
1406 out:
1407 status = 0;
1408 ioctl(device_fd, VHOST_VDPA_SET_STATUS, &status);
1409 return r;
1410 }
1411
1412 static NetClientState *net_vhost_vdpa_init(NetClientState *peer,
1413 const char *device,
1414 const char *name,
1415 int vdpa_device_fd,
1416 int queue_pair_index,
1417 int nvqs,
1418 bool is_datapath,
1419 bool svq,
1420 struct vhost_vdpa_iova_range iova_range,
1421 uint64_t features,
1422 Error **errp)
1423 {
1424 NetClientState *nc = NULL;
1425 VhostVDPAState *s;
1426 int ret = 0;
1427 assert(name);
1428 int cvq_isolated;
1429
1430 if (is_datapath) {
1431 nc = qemu_new_net_client(&net_vhost_vdpa_info, peer, device,
1432 name);
1433 } else {
1434 cvq_isolated = vhost_vdpa_probe_cvq_isolation(vdpa_device_fd, features,
1435 queue_pair_index * 2,
1436 errp);
1437 if (unlikely(cvq_isolated < 0)) {
1438 return NULL;
1439 }
1440
1441 nc = qemu_new_net_control_client(&net_vhost_vdpa_cvq_info, peer,
1442 device, name);
1443 }
1444 qemu_set_info_str(nc, TYPE_VHOST_VDPA);
1445 s = DO_UPCAST(VhostVDPAState, nc, nc);
1446
1447 s->vhost_vdpa.device_fd = vdpa_device_fd;
1448 s->vhost_vdpa.index = queue_pair_index;
1449 s->always_svq = svq;
1450 s->migration_state.notify = vdpa_net_migration_state_notifier;
1451 s->vhost_vdpa.shadow_vqs_enabled = svq;
1452 s->vhost_vdpa.iova_range = iova_range;
1453 s->vhost_vdpa.shadow_data = svq;
1454 if (queue_pair_index == 0) {
1455 vhost_vdpa_net_valid_svq_features(features,
1456 &s->vhost_vdpa.migration_blocker);
1457 } else if (!is_datapath) {
1458 s->cvq_cmd_out_buffer = mmap(NULL, vhost_vdpa_net_cvq_cmd_page_len(),
1459 PROT_READ | PROT_WRITE,
1460 MAP_SHARED | MAP_ANONYMOUS, -1, 0);
1461 s->status = mmap(NULL, vhost_vdpa_net_cvq_cmd_page_len(),
1462 PROT_READ | PROT_WRITE, MAP_SHARED | MAP_ANONYMOUS,
1463 -1, 0);
1464
1465 s->vhost_vdpa.shadow_vq_ops = &vhost_vdpa_net_svq_ops;
1466 s->vhost_vdpa.shadow_vq_ops_opaque = s;
1467 s->cvq_isolated = cvq_isolated;
1468 }
1469 ret = vhost_vdpa_add(nc, (void *)&s->vhost_vdpa, queue_pair_index, nvqs);
1470 if (ret) {
1471 qemu_del_net_client(nc);
1472 return NULL;
1473 }
1474 return nc;
1475 }
1476
1477 static int vhost_vdpa_get_features(int fd, uint64_t *features, Error **errp)
1478 {
1479 int ret = ioctl(fd, VHOST_GET_FEATURES, features);
1480 if (unlikely(ret < 0)) {
1481 error_setg_errno(errp, errno,
1482 "Fail to query features from vhost-vDPA device");
1483 }
1484 return ret;
1485 }
1486
1487 static int vhost_vdpa_get_max_queue_pairs(int fd, uint64_t features,
1488 int *has_cvq, Error **errp)
1489 {
1490 unsigned long config_size = offsetof(struct vhost_vdpa_config, buf);
1491 g_autofree struct vhost_vdpa_config *config = NULL;
1492 __virtio16 *max_queue_pairs;
1493 int ret;
1494
1495 if (features & (1 << VIRTIO_NET_F_CTRL_VQ)) {
1496 *has_cvq = 1;
1497 } else {
1498 *has_cvq = 0;
1499 }
1500
1501 if (features & (1 << VIRTIO_NET_F_MQ)) {
1502 config = g_malloc0(config_size + sizeof(*max_queue_pairs));
1503 config->off = offsetof(struct virtio_net_config, max_virtqueue_pairs);
1504 config->len = sizeof(*max_queue_pairs);
1505
1506 ret = ioctl(fd, VHOST_VDPA_GET_CONFIG, config);
1507 if (ret) {
1508 error_setg(errp, "Fail to get config from vhost-vDPA device");
1509 return -ret;
1510 }
1511
1512 max_queue_pairs = (__virtio16 *)&config->buf;
1513
1514 return lduw_le_p(max_queue_pairs);
1515 }
1516
1517 return 1;
1518 }
1519
1520 int net_init_vhost_vdpa(const Netdev *netdev, const char *name,
1521 NetClientState *peer, Error **errp)
1522 {
1523 const NetdevVhostVDPAOptions *opts;
1524 uint64_t features;
1525 int vdpa_device_fd;
1526 g_autofree NetClientState **ncs = NULL;
1527 struct vhost_vdpa_iova_range iova_range;
1528 NetClientState *nc;
1529 int queue_pairs, r, i = 0, has_cvq = 0;
1530
1531 assert(netdev->type == NET_CLIENT_DRIVER_VHOST_VDPA);
1532 opts = &netdev->u.vhost_vdpa;
1533 if (!opts->vhostdev && !opts->vhostfd) {
1534 error_setg(errp,
1535 "vhost-vdpa: neither vhostdev= nor vhostfd= was specified");
1536 return -1;
1537 }
1538
1539 if (opts->vhostdev && opts->vhostfd) {
1540 error_setg(errp,
1541 "vhost-vdpa: vhostdev= and vhostfd= are mutually exclusive");
1542 return -1;
1543 }
1544
1545 if (opts->vhostdev) {
1546 vdpa_device_fd = qemu_open(opts->vhostdev, O_RDWR, errp);
1547 if (vdpa_device_fd == -1) {
1548 return -errno;
1549 }
1550 } else {
1551 /* has_vhostfd */
1552 vdpa_device_fd = monitor_fd_param(monitor_cur(), opts->vhostfd, errp);
1553 if (vdpa_device_fd == -1) {
1554 error_prepend(errp, "vhost-vdpa: unable to parse vhostfd: ");
1555 return -1;
1556 }
1557 }
1558
1559 r = vhost_vdpa_get_features(vdpa_device_fd, &features, errp);
1560 if (unlikely(r < 0)) {
1561 goto err;
1562 }
1563
1564 queue_pairs = vhost_vdpa_get_max_queue_pairs(vdpa_device_fd, features,
1565 &has_cvq, errp);
1566 if (queue_pairs < 0) {
1567 qemu_close(vdpa_device_fd);
1568 return queue_pairs;
1569 }
1570
1571 r = vhost_vdpa_get_iova_range(vdpa_device_fd, &iova_range);
1572 if (unlikely(r < 0)) {
1573 error_setg(errp, "vhost-vdpa: get iova range failed: %s",
1574 strerror(-r));
1575 goto err;
1576 }
1577
1578 if (opts->x_svq && !vhost_vdpa_net_valid_svq_features(features, errp)) {
1579 goto err;
1580 }
1581
1582 ncs = g_malloc0(sizeof(*ncs) * queue_pairs);
1583
1584 for (i = 0; i < queue_pairs; i++) {
1585 ncs[i] = net_vhost_vdpa_init(peer, TYPE_VHOST_VDPA, name,
1586 vdpa_device_fd, i, 2, true, opts->x_svq,
1587 iova_range, features, errp);
1588 if (!ncs[i])
1589 goto err;
1590 }
1591
1592 if (has_cvq) {
1593 nc = net_vhost_vdpa_init(peer, TYPE_VHOST_VDPA, name,
1594 vdpa_device_fd, i, 1, false,
1595 opts->x_svq, iova_range, features, errp);
1596 if (!nc)
1597 goto err;
1598 }
1599
1600 return 0;
1601
1602 err:
1603 if (i) {
1604 for (i--; i >= 0; i--) {
1605 qemu_del_net_client(ncs[i]);
1606 }
1607 }
1608
1609 qemu_close(vdpa_device_fd);
1610
1611 return -1;
1612 }