]> git.proxmox.com Git - mirror_qemu.git/blob - net/vhost-vdpa.c
vdpa: Introduce cursors to vhost_vdpa_net_loadx()
[mirror_qemu.git] / net / vhost-vdpa.c
1 /*
2 * vhost-vdpa.c
3 *
4 * Copyright(c) 2017-2018 Intel Corporation.
5 * Copyright(c) 2020 Red Hat, Inc.
6 *
7 * This work is licensed under the terms of the GNU GPL, version 2 or later.
8 * See the COPYING file in the top-level directory.
9 *
10 */
11
12 #include "qemu/osdep.h"
13 #include "clients.h"
14 #include "hw/virtio/virtio-net.h"
15 #include "net/vhost_net.h"
16 #include "net/vhost-vdpa.h"
17 #include "hw/virtio/vhost-vdpa.h"
18 #include "qemu/config-file.h"
19 #include "qemu/error-report.h"
20 #include "qemu/log.h"
21 #include "qemu/memalign.h"
22 #include "qemu/option.h"
23 #include "qapi/error.h"
24 #include <linux/vhost.h>
25 #include <sys/ioctl.h>
26 #include <err.h>
27 #include "standard-headers/linux/virtio_net.h"
28 #include "monitor/monitor.h"
29 #include "migration/migration.h"
30 #include "migration/misc.h"
31 #include "hw/virtio/vhost.h"
32
33 /* Todo:need to add the multiqueue support here */
34 typedef struct VhostVDPAState {
35 NetClientState nc;
36 struct vhost_vdpa vhost_vdpa;
37 Notifier migration_state;
38 VHostNetState *vhost_net;
39
40 /* Control commands shadow buffers */
41 void *cvq_cmd_out_buffer;
42 virtio_net_ctrl_ack *status;
43
44 /* The device always have SVQ enabled */
45 bool always_svq;
46
47 /* The device can isolate CVQ in its own ASID */
48 bool cvq_isolated;
49
50 bool started;
51 } VhostVDPAState;
52
53 /*
54 * The array is sorted alphabetically in ascending order,
55 * with the exception of VHOST_INVALID_FEATURE_BIT,
56 * which should always be the last entry.
57 */
58 const int vdpa_feature_bits[] = {
59 VIRTIO_F_ANY_LAYOUT,
60 VIRTIO_F_IOMMU_PLATFORM,
61 VIRTIO_F_NOTIFY_ON_EMPTY,
62 VIRTIO_F_RING_PACKED,
63 VIRTIO_F_RING_RESET,
64 VIRTIO_F_VERSION_1,
65 VIRTIO_NET_F_CSUM,
66 VIRTIO_NET_F_CTRL_GUEST_OFFLOADS,
67 VIRTIO_NET_F_CTRL_MAC_ADDR,
68 VIRTIO_NET_F_CTRL_RX,
69 VIRTIO_NET_F_CTRL_RX_EXTRA,
70 VIRTIO_NET_F_CTRL_VLAN,
71 VIRTIO_NET_F_CTRL_VQ,
72 VIRTIO_NET_F_GSO,
73 VIRTIO_NET_F_GUEST_CSUM,
74 VIRTIO_NET_F_GUEST_ECN,
75 VIRTIO_NET_F_GUEST_TSO4,
76 VIRTIO_NET_F_GUEST_TSO6,
77 VIRTIO_NET_F_GUEST_UFO,
78 VIRTIO_NET_F_GUEST_USO4,
79 VIRTIO_NET_F_GUEST_USO6,
80 VIRTIO_NET_F_HASH_REPORT,
81 VIRTIO_NET_F_HOST_ECN,
82 VIRTIO_NET_F_HOST_TSO4,
83 VIRTIO_NET_F_HOST_TSO6,
84 VIRTIO_NET_F_HOST_UFO,
85 VIRTIO_NET_F_HOST_USO,
86 VIRTIO_NET_F_MQ,
87 VIRTIO_NET_F_MRG_RXBUF,
88 VIRTIO_NET_F_MTU,
89 VIRTIO_NET_F_RSS,
90 VIRTIO_NET_F_STATUS,
91 VIRTIO_RING_F_EVENT_IDX,
92 VIRTIO_RING_F_INDIRECT_DESC,
93
94 /* VHOST_INVALID_FEATURE_BIT should always be the last entry */
95 VHOST_INVALID_FEATURE_BIT
96 };
97
98 /** Supported device specific feature bits with SVQ */
99 static const uint64_t vdpa_svq_device_features =
100 BIT_ULL(VIRTIO_NET_F_CSUM) |
101 BIT_ULL(VIRTIO_NET_F_GUEST_CSUM) |
102 BIT_ULL(VIRTIO_NET_F_CTRL_GUEST_OFFLOADS) |
103 BIT_ULL(VIRTIO_NET_F_MTU) |
104 BIT_ULL(VIRTIO_NET_F_MAC) |
105 BIT_ULL(VIRTIO_NET_F_GUEST_TSO4) |
106 BIT_ULL(VIRTIO_NET_F_GUEST_TSO6) |
107 BIT_ULL(VIRTIO_NET_F_GUEST_ECN) |
108 BIT_ULL(VIRTIO_NET_F_GUEST_UFO) |
109 BIT_ULL(VIRTIO_NET_F_HOST_TSO4) |
110 BIT_ULL(VIRTIO_NET_F_HOST_TSO6) |
111 BIT_ULL(VIRTIO_NET_F_HOST_ECN) |
112 BIT_ULL(VIRTIO_NET_F_HOST_UFO) |
113 BIT_ULL(VIRTIO_NET_F_MRG_RXBUF) |
114 BIT_ULL(VIRTIO_NET_F_STATUS) |
115 BIT_ULL(VIRTIO_NET_F_CTRL_VQ) |
116 BIT_ULL(VIRTIO_NET_F_CTRL_RX) |
117 BIT_ULL(VIRTIO_NET_F_CTRL_VLAN) |
118 BIT_ULL(VIRTIO_NET_F_CTRL_RX_EXTRA) |
119 BIT_ULL(VIRTIO_NET_F_MQ) |
120 BIT_ULL(VIRTIO_F_ANY_LAYOUT) |
121 BIT_ULL(VIRTIO_NET_F_CTRL_MAC_ADDR) |
122 /* VHOST_F_LOG_ALL is exposed by SVQ */
123 BIT_ULL(VHOST_F_LOG_ALL) |
124 BIT_ULL(VIRTIO_NET_F_RSC_EXT) |
125 BIT_ULL(VIRTIO_NET_F_STANDBY) |
126 BIT_ULL(VIRTIO_NET_F_SPEED_DUPLEX);
127
128 #define VHOST_VDPA_NET_CVQ_ASID 1
129
130 VHostNetState *vhost_vdpa_get_vhost_net(NetClientState *nc)
131 {
132 VhostVDPAState *s = DO_UPCAST(VhostVDPAState, nc, nc);
133 assert(nc->info->type == NET_CLIENT_DRIVER_VHOST_VDPA);
134 return s->vhost_net;
135 }
136
137 static size_t vhost_vdpa_net_cvq_cmd_len(void)
138 {
139 /*
140 * MAC_TABLE_SET is the ctrl command that produces the longer out buffer.
141 * In buffer is always 1 byte, so it should fit here
142 */
143 return sizeof(struct virtio_net_ctrl_hdr) +
144 2 * sizeof(struct virtio_net_ctrl_mac) +
145 MAC_TABLE_ENTRIES * ETH_ALEN;
146 }
147
148 static size_t vhost_vdpa_net_cvq_cmd_page_len(void)
149 {
150 return ROUND_UP(vhost_vdpa_net_cvq_cmd_len(), qemu_real_host_page_size());
151 }
152
153 static bool vhost_vdpa_net_valid_svq_features(uint64_t features, Error **errp)
154 {
155 uint64_t invalid_dev_features =
156 features & ~vdpa_svq_device_features &
157 /* Transport are all accepted at this point */
158 ~MAKE_64BIT_MASK(VIRTIO_TRANSPORT_F_START,
159 VIRTIO_TRANSPORT_F_END - VIRTIO_TRANSPORT_F_START);
160
161 if (invalid_dev_features) {
162 error_setg(errp, "vdpa svq does not work with features 0x%" PRIx64,
163 invalid_dev_features);
164 return false;
165 }
166
167 return vhost_svq_valid_features(features, errp);
168 }
169
170 static int vhost_vdpa_net_check_device_id(struct vhost_net *net)
171 {
172 uint32_t device_id;
173 int ret;
174 struct vhost_dev *hdev;
175
176 hdev = (struct vhost_dev *)&net->dev;
177 ret = hdev->vhost_ops->vhost_get_device_id(hdev, &device_id);
178 if (device_id != VIRTIO_ID_NET) {
179 return -ENOTSUP;
180 }
181 return ret;
182 }
183
184 static int vhost_vdpa_add(NetClientState *ncs, void *be,
185 int queue_pair_index, int nvqs)
186 {
187 VhostNetOptions options;
188 struct vhost_net *net = NULL;
189 VhostVDPAState *s;
190 int ret;
191
192 options.backend_type = VHOST_BACKEND_TYPE_VDPA;
193 assert(ncs->info->type == NET_CLIENT_DRIVER_VHOST_VDPA);
194 s = DO_UPCAST(VhostVDPAState, nc, ncs);
195 options.net_backend = ncs;
196 options.opaque = be;
197 options.busyloop_timeout = 0;
198 options.nvqs = nvqs;
199
200 net = vhost_net_init(&options);
201 if (!net) {
202 error_report("failed to init vhost_net for queue");
203 goto err_init;
204 }
205 s->vhost_net = net;
206 ret = vhost_vdpa_net_check_device_id(net);
207 if (ret) {
208 goto err_check;
209 }
210 return 0;
211 err_check:
212 vhost_net_cleanup(net);
213 g_free(net);
214 err_init:
215 return -1;
216 }
217
218 static void vhost_vdpa_cleanup(NetClientState *nc)
219 {
220 VhostVDPAState *s = DO_UPCAST(VhostVDPAState, nc, nc);
221
222 /*
223 * If a peer NIC is attached, do not cleanup anything.
224 * Cleanup will happen as a part of qemu_cleanup() -> net_cleanup()
225 * when the guest is shutting down.
226 */
227 if (nc->peer && nc->peer->info->type == NET_CLIENT_DRIVER_NIC) {
228 return;
229 }
230 munmap(s->cvq_cmd_out_buffer, vhost_vdpa_net_cvq_cmd_page_len());
231 munmap(s->status, vhost_vdpa_net_cvq_cmd_page_len());
232 if (s->vhost_net) {
233 vhost_net_cleanup(s->vhost_net);
234 g_free(s->vhost_net);
235 s->vhost_net = NULL;
236 }
237 if (s->vhost_vdpa.device_fd >= 0) {
238 qemu_close(s->vhost_vdpa.device_fd);
239 s->vhost_vdpa.device_fd = -1;
240 }
241 }
242
243 static bool vhost_vdpa_has_vnet_hdr(NetClientState *nc)
244 {
245 assert(nc->info->type == NET_CLIENT_DRIVER_VHOST_VDPA);
246
247 return true;
248 }
249
250 static bool vhost_vdpa_has_ufo(NetClientState *nc)
251 {
252 assert(nc->info->type == NET_CLIENT_DRIVER_VHOST_VDPA);
253 VhostVDPAState *s = DO_UPCAST(VhostVDPAState, nc, nc);
254 uint64_t features = 0;
255 features |= (1ULL << VIRTIO_NET_F_HOST_UFO);
256 features = vhost_net_get_features(s->vhost_net, features);
257 return !!(features & (1ULL << VIRTIO_NET_F_HOST_UFO));
258
259 }
260
261 static bool vhost_vdpa_check_peer_type(NetClientState *nc, ObjectClass *oc,
262 Error **errp)
263 {
264 const char *driver = object_class_get_name(oc);
265
266 if (!g_str_has_prefix(driver, "virtio-net-")) {
267 error_setg(errp, "vhost-vdpa requires frontend driver virtio-net-*");
268 return false;
269 }
270
271 return true;
272 }
273
274 /** Dummy receive in case qemu falls back to userland tap networking */
275 static ssize_t vhost_vdpa_receive(NetClientState *nc, const uint8_t *buf,
276 size_t size)
277 {
278 return size;
279 }
280
281 /** From any vdpa net client, get the netclient of the first queue pair */
282 static VhostVDPAState *vhost_vdpa_net_first_nc_vdpa(VhostVDPAState *s)
283 {
284 NICState *nic = qemu_get_nic(s->nc.peer);
285 NetClientState *nc0 = qemu_get_peer(nic->ncs, 0);
286
287 return DO_UPCAST(VhostVDPAState, nc, nc0);
288 }
289
290 static void vhost_vdpa_net_log_global_enable(VhostVDPAState *s, bool enable)
291 {
292 struct vhost_vdpa *v = &s->vhost_vdpa;
293 VirtIONet *n;
294 VirtIODevice *vdev;
295 int data_queue_pairs, cvq, r;
296
297 /* We are only called on the first data vqs and only if x-svq is not set */
298 if (s->vhost_vdpa.shadow_vqs_enabled == enable) {
299 return;
300 }
301
302 vdev = v->dev->vdev;
303 n = VIRTIO_NET(vdev);
304 if (!n->vhost_started) {
305 return;
306 }
307
308 data_queue_pairs = n->multiqueue ? n->max_queue_pairs : 1;
309 cvq = virtio_vdev_has_feature(vdev, VIRTIO_NET_F_CTRL_VQ) ?
310 n->max_ncs - n->max_queue_pairs : 0;
311 /*
312 * TODO: vhost_net_stop does suspend, get_base and reset. We can be smarter
313 * in the future and resume the device if read-only operations between
314 * suspend and reset goes wrong.
315 */
316 vhost_net_stop(vdev, n->nic->ncs, data_queue_pairs, cvq);
317
318 /* Start will check migration setup_or_active to configure or not SVQ */
319 r = vhost_net_start(vdev, n->nic->ncs, data_queue_pairs, cvq);
320 if (unlikely(r < 0)) {
321 error_report("unable to start vhost net: %s(%d)", g_strerror(-r), -r);
322 }
323 }
324
325 static void vdpa_net_migration_state_notifier(Notifier *notifier, void *data)
326 {
327 MigrationState *migration = data;
328 VhostVDPAState *s = container_of(notifier, VhostVDPAState,
329 migration_state);
330
331 if (migration_in_setup(migration)) {
332 vhost_vdpa_net_log_global_enable(s, true);
333 } else if (migration_has_failed(migration)) {
334 vhost_vdpa_net_log_global_enable(s, false);
335 }
336 }
337
338 static void vhost_vdpa_net_data_start_first(VhostVDPAState *s)
339 {
340 struct vhost_vdpa *v = &s->vhost_vdpa;
341
342 add_migration_state_change_notifier(&s->migration_state);
343 if (v->shadow_vqs_enabled) {
344 v->iova_tree = vhost_iova_tree_new(v->iova_range.first,
345 v->iova_range.last);
346 }
347 }
348
349 static int vhost_vdpa_net_data_start(NetClientState *nc)
350 {
351 VhostVDPAState *s = DO_UPCAST(VhostVDPAState, nc, nc);
352 struct vhost_vdpa *v = &s->vhost_vdpa;
353
354 assert(nc->info->type == NET_CLIENT_DRIVER_VHOST_VDPA);
355
356 if (s->always_svq ||
357 migration_is_setup_or_active(migrate_get_current()->state)) {
358 v->shadow_vqs_enabled = true;
359 v->shadow_data = true;
360 } else {
361 v->shadow_vqs_enabled = false;
362 v->shadow_data = false;
363 }
364
365 if (v->index == 0) {
366 vhost_vdpa_net_data_start_first(s);
367 return 0;
368 }
369
370 if (v->shadow_vqs_enabled) {
371 VhostVDPAState *s0 = vhost_vdpa_net_first_nc_vdpa(s);
372 v->iova_tree = s0->vhost_vdpa.iova_tree;
373 }
374
375 return 0;
376 }
377
378 static int vhost_vdpa_net_data_load(NetClientState *nc)
379 {
380 VhostVDPAState *s = DO_UPCAST(VhostVDPAState, nc, nc);
381 struct vhost_vdpa *v = &s->vhost_vdpa;
382 bool has_cvq = v->dev->vq_index_end % 2;
383
384 if (has_cvq) {
385 return 0;
386 }
387
388 for (int i = 0; i < v->dev->nvqs; ++i) {
389 vhost_vdpa_set_vring_ready(v, i + v->dev->vq_index);
390 }
391 return 0;
392 }
393
394 static void vhost_vdpa_net_client_stop(NetClientState *nc)
395 {
396 VhostVDPAState *s = DO_UPCAST(VhostVDPAState, nc, nc);
397 struct vhost_dev *dev;
398
399 assert(nc->info->type == NET_CLIENT_DRIVER_VHOST_VDPA);
400
401 if (s->vhost_vdpa.index == 0) {
402 remove_migration_state_change_notifier(&s->migration_state);
403 }
404
405 dev = s->vhost_vdpa.dev;
406 if (dev->vq_index + dev->nvqs == dev->vq_index_end) {
407 g_clear_pointer(&s->vhost_vdpa.iova_tree, vhost_iova_tree_delete);
408 } else {
409 s->vhost_vdpa.iova_tree = NULL;
410 }
411 }
412
413 static NetClientInfo net_vhost_vdpa_info = {
414 .type = NET_CLIENT_DRIVER_VHOST_VDPA,
415 .size = sizeof(VhostVDPAState),
416 .receive = vhost_vdpa_receive,
417 .start = vhost_vdpa_net_data_start,
418 .load = vhost_vdpa_net_data_load,
419 .stop = vhost_vdpa_net_client_stop,
420 .cleanup = vhost_vdpa_cleanup,
421 .has_vnet_hdr = vhost_vdpa_has_vnet_hdr,
422 .has_ufo = vhost_vdpa_has_ufo,
423 .check_peer_type = vhost_vdpa_check_peer_type,
424 };
425
426 static int64_t vhost_vdpa_get_vring_group(int device_fd, unsigned vq_index,
427 Error **errp)
428 {
429 struct vhost_vring_state state = {
430 .index = vq_index,
431 };
432 int r = ioctl(device_fd, VHOST_VDPA_GET_VRING_GROUP, &state);
433
434 if (unlikely(r < 0)) {
435 r = -errno;
436 error_setg_errno(errp, errno, "Cannot get VQ %u group", vq_index);
437 return r;
438 }
439
440 return state.num;
441 }
442
443 static int vhost_vdpa_set_address_space_id(struct vhost_vdpa *v,
444 unsigned vq_group,
445 unsigned asid_num)
446 {
447 struct vhost_vring_state asid = {
448 .index = vq_group,
449 .num = asid_num,
450 };
451 int r;
452
453 r = ioctl(v->device_fd, VHOST_VDPA_SET_GROUP_ASID, &asid);
454 if (unlikely(r < 0)) {
455 error_report("Can't set vq group %u asid %u, errno=%d (%s)",
456 asid.index, asid.num, errno, g_strerror(errno));
457 }
458 return r;
459 }
460
461 static void vhost_vdpa_cvq_unmap_buf(struct vhost_vdpa *v, void *addr)
462 {
463 VhostIOVATree *tree = v->iova_tree;
464 DMAMap needle = {
465 /*
466 * No need to specify size or to look for more translations since
467 * this contiguous chunk was allocated by us.
468 */
469 .translated_addr = (hwaddr)(uintptr_t)addr,
470 };
471 const DMAMap *map = vhost_iova_tree_find_iova(tree, &needle);
472 int r;
473
474 if (unlikely(!map)) {
475 error_report("Cannot locate expected map");
476 return;
477 }
478
479 r = vhost_vdpa_dma_unmap(v, v->address_space_id, map->iova, map->size + 1);
480 if (unlikely(r != 0)) {
481 error_report("Device cannot unmap: %s(%d)", g_strerror(r), r);
482 }
483
484 vhost_iova_tree_remove(tree, *map);
485 }
486
487 /** Map CVQ buffer. */
488 static int vhost_vdpa_cvq_map_buf(struct vhost_vdpa *v, void *buf, size_t size,
489 bool write)
490 {
491 DMAMap map = {};
492 int r;
493
494 map.translated_addr = (hwaddr)(uintptr_t)buf;
495 map.size = size - 1;
496 map.perm = write ? IOMMU_RW : IOMMU_RO,
497 r = vhost_iova_tree_map_alloc(v->iova_tree, &map);
498 if (unlikely(r != IOVA_OK)) {
499 error_report("Cannot map injected element");
500 return r;
501 }
502
503 r = vhost_vdpa_dma_map(v, v->address_space_id, map.iova,
504 vhost_vdpa_net_cvq_cmd_page_len(), buf, !write);
505 if (unlikely(r < 0)) {
506 goto dma_map_err;
507 }
508
509 return 0;
510
511 dma_map_err:
512 vhost_iova_tree_remove(v->iova_tree, map);
513 return r;
514 }
515
516 static int vhost_vdpa_net_cvq_start(NetClientState *nc)
517 {
518 VhostVDPAState *s, *s0;
519 struct vhost_vdpa *v;
520 int64_t cvq_group;
521 int r;
522 Error *err = NULL;
523
524 assert(nc->info->type == NET_CLIENT_DRIVER_VHOST_VDPA);
525
526 s = DO_UPCAST(VhostVDPAState, nc, nc);
527 v = &s->vhost_vdpa;
528
529 s0 = vhost_vdpa_net_first_nc_vdpa(s);
530 v->shadow_data = s0->vhost_vdpa.shadow_vqs_enabled;
531 v->shadow_vqs_enabled = s0->vhost_vdpa.shadow_vqs_enabled;
532 s->vhost_vdpa.address_space_id = VHOST_VDPA_GUEST_PA_ASID;
533
534 if (s->vhost_vdpa.shadow_data) {
535 /* SVQ is already configured for all virtqueues */
536 goto out;
537 }
538
539 /*
540 * If we early return in these cases SVQ will not be enabled. The migration
541 * will be blocked as long as vhost-vdpa backends will not offer _F_LOG.
542 */
543 if (!vhost_vdpa_net_valid_svq_features(v->dev->features, NULL)) {
544 return 0;
545 }
546
547 if (!s->cvq_isolated) {
548 return 0;
549 }
550
551 cvq_group = vhost_vdpa_get_vring_group(v->device_fd,
552 v->dev->vq_index_end - 1,
553 &err);
554 if (unlikely(cvq_group < 0)) {
555 error_report_err(err);
556 return cvq_group;
557 }
558
559 r = vhost_vdpa_set_address_space_id(v, cvq_group, VHOST_VDPA_NET_CVQ_ASID);
560 if (unlikely(r < 0)) {
561 return r;
562 }
563
564 v->shadow_vqs_enabled = true;
565 s->vhost_vdpa.address_space_id = VHOST_VDPA_NET_CVQ_ASID;
566
567 out:
568 if (!s->vhost_vdpa.shadow_vqs_enabled) {
569 return 0;
570 }
571
572 if (s0->vhost_vdpa.iova_tree) {
573 /*
574 * SVQ is already configured for all virtqueues. Reuse IOVA tree for
575 * simplicity, whether CVQ shares ASID with guest or not, because:
576 * - Memory listener need access to guest's memory addresses allocated
577 * in the IOVA tree.
578 * - There should be plenty of IOVA address space for both ASID not to
579 * worry about collisions between them. Guest's translations are
580 * still validated with virtio virtqueue_pop so there is no risk for
581 * the guest to access memory that it shouldn't.
582 *
583 * To allocate a iova tree per ASID is doable but it complicates the
584 * code and it is not worth it for the moment.
585 */
586 v->iova_tree = s0->vhost_vdpa.iova_tree;
587 } else {
588 v->iova_tree = vhost_iova_tree_new(v->iova_range.first,
589 v->iova_range.last);
590 }
591
592 r = vhost_vdpa_cvq_map_buf(&s->vhost_vdpa, s->cvq_cmd_out_buffer,
593 vhost_vdpa_net_cvq_cmd_page_len(), false);
594 if (unlikely(r < 0)) {
595 return r;
596 }
597
598 r = vhost_vdpa_cvq_map_buf(&s->vhost_vdpa, s->status,
599 vhost_vdpa_net_cvq_cmd_page_len(), true);
600 if (unlikely(r < 0)) {
601 vhost_vdpa_cvq_unmap_buf(&s->vhost_vdpa, s->cvq_cmd_out_buffer);
602 }
603
604 return r;
605 }
606
607 static void vhost_vdpa_net_cvq_stop(NetClientState *nc)
608 {
609 VhostVDPAState *s = DO_UPCAST(VhostVDPAState, nc, nc);
610
611 assert(nc->info->type == NET_CLIENT_DRIVER_VHOST_VDPA);
612
613 if (s->vhost_vdpa.shadow_vqs_enabled) {
614 vhost_vdpa_cvq_unmap_buf(&s->vhost_vdpa, s->cvq_cmd_out_buffer);
615 vhost_vdpa_cvq_unmap_buf(&s->vhost_vdpa, s->status);
616 }
617
618 vhost_vdpa_net_client_stop(nc);
619 }
620
621 static ssize_t vhost_vdpa_net_cvq_add(VhostVDPAState *s,
622 const struct iovec *out_sg, size_t out_num,
623 const struct iovec *in_sg, size_t in_num)
624 {
625 VhostShadowVirtqueue *svq = g_ptr_array_index(s->vhost_vdpa.shadow_vqs, 0);
626 int r;
627
628 r = vhost_svq_add(svq, out_sg, out_num, in_sg, in_num, NULL);
629 if (unlikely(r != 0)) {
630 if (unlikely(r == -ENOSPC)) {
631 qemu_log_mask(LOG_GUEST_ERROR, "%s: No space on device queue\n",
632 __func__);
633 }
634 }
635
636 return r;
637 }
638
639 /*
640 * Convenience wrapper to poll SVQ for multiple control commands.
641 *
642 * Caller should hold the BQL when invoking this function, and should take
643 * the answer before SVQ pulls by itself when BQL is released.
644 */
645 static ssize_t vhost_vdpa_net_svq_poll(VhostVDPAState *s, size_t cmds_in_flight)
646 {
647 VhostShadowVirtqueue *svq = g_ptr_array_index(s->vhost_vdpa.shadow_vqs, 0);
648 return vhost_svq_poll(svq, cmds_in_flight);
649 }
650
651 static void vhost_vdpa_net_load_cursor_reset(VhostVDPAState *s,
652 struct iovec *out_cursor,
653 struct iovec *in_cursor)
654 {
655 /* reset the cursor of the output buffer for the device */
656 out_cursor->iov_base = s->cvq_cmd_out_buffer;
657 out_cursor->iov_len = vhost_vdpa_net_cvq_cmd_page_len();
658
659 /* reset the cursor of the in buffer for the device */
660 in_cursor->iov_base = s->status;
661 in_cursor->iov_len = vhost_vdpa_net_cvq_cmd_page_len();
662 }
663
664 static ssize_t vhost_vdpa_net_load_cmd(VhostVDPAState *s,
665 struct iovec *out_cursor,
666 struct iovec *in_cursor, uint8_t class,
667 uint8_t cmd, const struct iovec *data_sg,
668 size_t data_num)
669 {
670 const struct virtio_net_ctrl_hdr ctrl = {
671 .class = class,
672 .cmd = cmd,
673 };
674 size_t data_size = iov_size(data_sg, data_num);
675 struct iovec out, in;
676 ssize_t r;
677
678 assert(data_size < vhost_vdpa_net_cvq_cmd_page_len() - sizeof(ctrl));
679
680 /* pack the CVQ command header */
681 iov_from_buf(out_cursor, 1, 0, &ctrl, sizeof(ctrl));
682 /* pack the CVQ command command-specific-data */
683 iov_to_buf(data_sg, data_num, 0,
684 out_cursor->iov_base + sizeof(ctrl), data_size);
685
686 /* extract the required buffer from the cursor for output */
687 iov_copy(&out, 1, out_cursor, 1, 0, sizeof(ctrl) + data_size);
688 /* extract the required buffer from the cursor for input */
689 iov_copy(&in, 1, in_cursor, 1, 0, sizeof(*s->status));
690
691 r = vhost_vdpa_net_cvq_add(s, &out, 1, &in, 1);
692 if (unlikely(r < 0)) {
693 return r;
694 }
695
696 /*
697 * We can poll here since we've had BQL from the time
698 * we sent the descriptor.
699 */
700 return vhost_vdpa_net_svq_poll(s, 1);
701 }
702
703 static int vhost_vdpa_net_load_mac(VhostVDPAState *s, const VirtIONet *n,
704 struct iovec *out_cursor,
705 struct iovec *in_cursor)
706 {
707 if (virtio_vdev_has_feature(&n->parent_obj, VIRTIO_NET_F_CTRL_MAC_ADDR)) {
708 const struct iovec data = {
709 .iov_base = (void *)n->mac,
710 .iov_len = sizeof(n->mac),
711 };
712 ssize_t dev_written = vhost_vdpa_net_load_cmd(s, out_cursor, in_cursor,
713 VIRTIO_NET_CTRL_MAC,
714 VIRTIO_NET_CTRL_MAC_ADDR_SET,
715 &data, 1);
716 if (unlikely(dev_written < 0)) {
717 return dev_written;
718 }
719 if (*s->status != VIRTIO_NET_OK) {
720 return -EIO;
721 }
722 }
723
724 /*
725 * According to VirtIO standard, "The device MUST have an
726 * empty MAC filtering table on reset.".
727 *
728 * Therefore, there is no need to send this CVQ command if the
729 * driver also sets an empty MAC filter table, which aligns with
730 * the device's defaults.
731 *
732 * Note that the device's defaults can mismatch the driver's
733 * configuration only at live migration.
734 */
735 if (!virtio_vdev_has_feature(&n->parent_obj, VIRTIO_NET_F_CTRL_RX) ||
736 n->mac_table.in_use == 0) {
737 return 0;
738 }
739
740 uint32_t uni_entries = n->mac_table.first_multi,
741 uni_macs_size = uni_entries * ETH_ALEN,
742 mul_entries = n->mac_table.in_use - uni_entries,
743 mul_macs_size = mul_entries * ETH_ALEN;
744 struct virtio_net_ctrl_mac uni = {
745 .entries = cpu_to_le32(uni_entries),
746 };
747 struct virtio_net_ctrl_mac mul = {
748 .entries = cpu_to_le32(mul_entries),
749 };
750 const struct iovec data[] = {
751 {
752 .iov_base = &uni,
753 .iov_len = sizeof(uni),
754 }, {
755 .iov_base = n->mac_table.macs,
756 .iov_len = uni_macs_size,
757 }, {
758 .iov_base = &mul,
759 .iov_len = sizeof(mul),
760 }, {
761 .iov_base = &n->mac_table.macs[uni_macs_size],
762 .iov_len = mul_macs_size,
763 },
764 };
765 ssize_t dev_written = vhost_vdpa_net_load_cmd(s, out_cursor, in_cursor,
766 VIRTIO_NET_CTRL_MAC,
767 VIRTIO_NET_CTRL_MAC_TABLE_SET,
768 data, ARRAY_SIZE(data));
769 if (unlikely(dev_written < 0)) {
770 return dev_written;
771 }
772 if (*s->status != VIRTIO_NET_OK) {
773 return -EIO;
774 }
775
776 return 0;
777 }
778
779 static int vhost_vdpa_net_load_mq(VhostVDPAState *s,
780 const VirtIONet *n,
781 struct iovec *out_cursor,
782 struct iovec *in_cursor)
783 {
784 struct virtio_net_ctrl_mq mq;
785 ssize_t dev_written;
786
787 if (!virtio_vdev_has_feature(&n->parent_obj, VIRTIO_NET_F_MQ)) {
788 return 0;
789 }
790
791 mq.virtqueue_pairs = cpu_to_le16(n->curr_queue_pairs);
792 const struct iovec data = {
793 .iov_base = &mq,
794 .iov_len = sizeof(mq),
795 };
796 dev_written = vhost_vdpa_net_load_cmd(s, out_cursor, in_cursor,
797 VIRTIO_NET_CTRL_MQ,
798 VIRTIO_NET_CTRL_MQ_VQ_PAIRS_SET,
799 &data, 1);
800 if (unlikely(dev_written < 0)) {
801 return dev_written;
802 }
803 if (*s->status != VIRTIO_NET_OK) {
804 return -EIO;
805 }
806
807 return 0;
808 }
809
810 static int vhost_vdpa_net_load_offloads(VhostVDPAState *s,
811 const VirtIONet *n,
812 struct iovec *out_cursor,
813 struct iovec *in_cursor)
814 {
815 uint64_t offloads;
816 ssize_t dev_written;
817
818 if (!virtio_vdev_has_feature(&n->parent_obj,
819 VIRTIO_NET_F_CTRL_GUEST_OFFLOADS)) {
820 return 0;
821 }
822
823 if (n->curr_guest_offloads == virtio_net_supported_guest_offloads(n)) {
824 /*
825 * According to VirtIO standard, "Upon feature negotiation
826 * corresponding offload gets enabled to preserve
827 * backward compatibility.".
828 *
829 * Therefore, there is no need to send this CVQ command if the
830 * driver also enables all supported offloads, which aligns with
831 * the device's defaults.
832 *
833 * Note that the device's defaults can mismatch the driver's
834 * configuration only at live migration.
835 */
836 return 0;
837 }
838
839 offloads = cpu_to_le64(n->curr_guest_offloads);
840 const struct iovec data = {
841 .iov_base = &offloads,
842 .iov_len = sizeof(offloads),
843 };
844 dev_written = vhost_vdpa_net_load_cmd(s, out_cursor, in_cursor,
845 VIRTIO_NET_CTRL_GUEST_OFFLOADS,
846 VIRTIO_NET_CTRL_GUEST_OFFLOADS_SET,
847 &data, 1);
848 if (unlikely(dev_written < 0)) {
849 return dev_written;
850 }
851 if (*s->status != VIRTIO_NET_OK) {
852 return -EIO;
853 }
854
855 return 0;
856 }
857
858 static int vhost_vdpa_net_load_rx_mode(VhostVDPAState *s,
859 struct iovec *out_cursor,
860 struct iovec *in_cursor,
861 uint8_t cmd,
862 uint8_t on)
863 {
864 const struct iovec data = {
865 .iov_base = &on,
866 .iov_len = sizeof(on),
867 };
868 ssize_t dev_written;
869
870 dev_written = vhost_vdpa_net_load_cmd(s, out_cursor, in_cursor,
871 VIRTIO_NET_CTRL_RX,
872 cmd, &data, 1);
873 if (unlikely(dev_written < 0)) {
874 return dev_written;
875 }
876 if (*s->status != VIRTIO_NET_OK) {
877 return -EIO;
878 }
879
880 return 0;
881 }
882
883 static int vhost_vdpa_net_load_rx(VhostVDPAState *s,
884 const VirtIONet *n,
885 struct iovec *out_cursor,
886 struct iovec *in_cursor)
887 {
888 ssize_t r;
889
890 if (!virtio_vdev_has_feature(&n->parent_obj, VIRTIO_NET_F_CTRL_RX)) {
891 return 0;
892 }
893
894 /*
895 * According to virtio_net_reset(), device turns promiscuous mode
896 * on by default.
897 *
898 * Additionally, according to VirtIO standard, "Since there are
899 * no guarantees, it can use a hash filter or silently switch to
900 * allmulti or promiscuous mode if it is given too many addresses.".
901 * QEMU marks `n->mac_table.uni_overflow` if guest sets too many
902 * non-multicast MAC addresses, indicating that promiscuous mode
903 * should be enabled.
904 *
905 * Therefore, QEMU should only send this CVQ command if the
906 * `n->mac_table.uni_overflow` is not marked and `n->promisc` is off,
907 * which sets promiscuous mode on, different from the device's defaults.
908 *
909 * Note that the device's defaults can mismatch the driver's
910 * configuration only at live migration.
911 */
912 if (!n->mac_table.uni_overflow && !n->promisc) {
913 r = vhost_vdpa_net_load_rx_mode(s, out_cursor, in_cursor,
914 VIRTIO_NET_CTRL_RX_PROMISC, 0);
915 if (unlikely(r < 0)) {
916 return r;
917 }
918 }
919
920 /*
921 * According to virtio_net_reset(), device turns all-multicast mode
922 * off by default.
923 *
924 * According to VirtIO standard, "Since there are no guarantees,
925 * it can use a hash filter or silently switch to allmulti or
926 * promiscuous mode if it is given too many addresses.". QEMU marks
927 * `n->mac_table.multi_overflow` if guest sets too many
928 * non-multicast MAC addresses.
929 *
930 * Therefore, QEMU should only send this CVQ command if the
931 * `n->mac_table.multi_overflow` is marked or `n->allmulti` is on,
932 * which sets all-multicast mode on, different from the device's defaults.
933 *
934 * Note that the device's defaults can mismatch the driver's
935 * configuration only at live migration.
936 */
937 if (n->mac_table.multi_overflow || n->allmulti) {
938 r = vhost_vdpa_net_load_rx_mode(s, out_cursor, in_cursor,
939 VIRTIO_NET_CTRL_RX_ALLMULTI, 1);
940 if (unlikely(r < 0)) {
941 return r;
942 }
943 }
944
945 if (!virtio_vdev_has_feature(&n->parent_obj, VIRTIO_NET_F_CTRL_RX_EXTRA)) {
946 return 0;
947 }
948
949 /*
950 * According to virtio_net_reset(), device turns all-unicast mode
951 * off by default.
952 *
953 * Therefore, QEMU should only send this CVQ command if the driver
954 * sets all-unicast mode on, different from the device's defaults.
955 *
956 * Note that the device's defaults can mismatch the driver's
957 * configuration only at live migration.
958 */
959 if (n->alluni) {
960 r = vhost_vdpa_net_load_rx_mode(s, out_cursor, in_cursor,
961 VIRTIO_NET_CTRL_RX_ALLUNI, 1);
962 if (r < 0) {
963 return r;
964 }
965 }
966
967 /*
968 * According to virtio_net_reset(), device turns non-multicast mode
969 * off by default.
970 *
971 * Therefore, QEMU should only send this CVQ command if the driver
972 * sets non-multicast mode on, different from the device's defaults.
973 *
974 * Note that the device's defaults can mismatch the driver's
975 * configuration only at live migration.
976 */
977 if (n->nomulti) {
978 r = vhost_vdpa_net_load_rx_mode(s, out_cursor, in_cursor,
979 VIRTIO_NET_CTRL_RX_NOMULTI, 1);
980 if (r < 0) {
981 return r;
982 }
983 }
984
985 /*
986 * According to virtio_net_reset(), device turns non-unicast mode
987 * off by default.
988 *
989 * Therefore, QEMU should only send this CVQ command if the driver
990 * sets non-unicast mode on, different from the device's defaults.
991 *
992 * Note that the device's defaults can mismatch the driver's
993 * configuration only at live migration.
994 */
995 if (n->nouni) {
996 r = vhost_vdpa_net_load_rx_mode(s, out_cursor, in_cursor,
997 VIRTIO_NET_CTRL_RX_NOUNI, 1);
998 if (r < 0) {
999 return r;
1000 }
1001 }
1002
1003 /*
1004 * According to virtio_net_reset(), device turns non-broadcast mode
1005 * off by default.
1006 *
1007 * Therefore, QEMU should only send this CVQ command if the driver
1008 * sets non-broadcast mode on, different from the device's defaults.
1009 *
1010 * Note that the device's defaults can mismatch the driver's
1011 * configuration only at live migration.
1012 */
1013 if (n->nobcast) {
1014 r = vhost_vdpa_net_load_rx_mode(s, out_cursor, in_cursor,
1015 VIRTIO_NET_CTRL_RX_NOBCAST, 1);
1016 if (r < 0) {
1017 return r;
1018 }
1019 }
1020
1021 return 0;
1022 }
1023
1024 static int vhost_vdpa_net_load_single_vlan(VhostVDPAState *s,
1025 const VirtIONet *n,
1026 struct iovec *out_cursor,
1027 struct iovec *in_cursor,
1028 uint16_t vid)
1029 {
1030 const struct iovec data = {
1031 .iov_base = &vid,
1032 .iov_len = sizeof(vid),
1033 };
1034 ssize_t dev_written = vhost_vdpa_net_load_cmd(s, out_cursor, in_cursor,
1035 VIRTIO_NET_CTRL_VLAN,
1036 VIRTIO_NET_CTRL_VLAN_ADD,
1037 &data, 1);
1038 if (unlikely(dev_written < 0)) {
1039 return dev_written;
1040 }
1041 if (unlikely(*s->status != VIRTIO_NET_OK)) {
1042 return -EIO;
1043 }
1044
1045 return 0;
1046 }
1047
1048 static int vhost_vdpa_net_load_vlan(VhostVDPAState *s,
1049 const VirtIONet *n,
1050 struct iovec *out_cursor,
1051 struct iovec *in_cursor)
1052 {
1053 int r;
1054
1055 if (!virtio_vdev_has_feature(&n->parent_obj, VIRTIO_NET_F_CTRL_VLAN)) {
1056 return 0;
1057 }
1058
1059 for (int i = 0; i < MAX_VLAN >> 5; i++) {
1060 for (int j = 0; n->vlans[i] && j <= 0x1f; j++) {
1061 if (n->vlans[i] & (1U << j)) {
1062 r = vhost_vdpa_net_load_single_vlan(s, n, out_cursor,
1063 in_cursor, (i << 5) + j);
1064 if (unlikely(r != 0)) {
1065 return r;
1066 }
1067 }
1068 }
1069 }
1070
1071 return 0;
1072 }
1073
1074 static int vhost_vdpa_net_cvq_load(NetClientState *nc)
1075 {
1076 VhostVDPAState *s = DO_UPCAST(VhostVDPAState, nc, nc);
1077 struct vhost_vdpa *v = &s->vhost_vdpa;
1078 const VirtIONet *n;
1079 int r;
1080 struct iovec out_cursor, in_cursor;
1081
1082 assert(nc->info->type == NET_CLIENT_DRIVER_VHOST_VDPA);
1083
1084 vhost_vdpa_set_vring_ready(v, v->dev->vq_index);
1085
1086 if (v->shadow_vqs_enabled) {
1087 n = VIRTIO_NET(v->dev->vdev);
1088 vhost_vdpa_net_load_cursor_reset(s, &out_cursor, &in_cursor);
1089 r = vhost_vdpa_net_load_mac(s, n, &out_cursor, &in_cursor);
1090 if (unlikely(r < 0)) {
1091 return r;
1092 }
1093 r = vhost_vdpa_net_load_mq(s, n, &out_cursor, &in_cursor);
1094 if (unlikely(r)) {
1095 return r;
1096 }
1097 r = vhost_vdpa_net_load_offloads(s, n, &out_cursor, &in_cursor);
1098 if (unlikely(r)) {
1099 return r;
1100 }
1101 r = vhost_vdpa_net_load_rx(s, n, &out_cursor, &in_cursor);
1102 if (unlikely(r)) {
1103 return r;
1104 }
1105 r = vhost_vdpa_net_load_vlan(s, n, &out_cursor, &in_cursor);
1106 if (unlikely(r)) {
1107 return r;
1108 }
1109 }
1110
1111 for (int i = 0; i < v->dev->vq_index; ++i) {
1112 vhost_vdpa_set_vring_ready(v, i);
1113 }
1114
1115 return 0;
1116 }
1117
1118 static NetClientInfo net_vhost_vdpa_cvq_info = {
1119 .type = NET_CLIENT_DRIVER_VHOST_VDPA,
1120 .size = sizeof(VhostVDPAState),
1121 .receive = vhost_vdpa_receive,
1122 .start = vhost_vdpa_net_cvq_start,
1123 .load = vhost_vdpa_net_cvq_load,
1124 .stop = vhost_vdpa_net_cvq_stop,
1125 .cleanup = vhost_vdpa_cleanup,
1126 .has_vnet_hdr = vhost_vdpa_has_vnet_hdr,
1127 .has_ufo = vhost_vdpa_has_ufo,
1128 .check_peer_type = vhost_vdpa_check_peer_type,
1129 };
1130
1131 /*
1132 * Forward the excessive VIRTIO_NET_CTRL_MAC_TABLE_SET CVQ command to
1133 * vdpa device.
1134 *
1135 * Considering that QEMU cannot send the entire filter table to the
1136 * vdpa device, it should send the VIRTIO_NET_CTRL_RX_PROMISC CVQ
1137 * command to enable promiscuous mode to receive all packets,
1138 * according to VirtIO standard, "Since there are no guarantees,
1139 * it can use a hash filter or silently switch to allmulti or
1140 * promiscuous mode if it is given too many addresses.".
1141 *
1142 * Since QEMU ignores MAC addresses beyond `MAC_TABLE_ENTRIES` and
1143 * marks `n->mac_table.x_overflow` accordingly, it should have
1144 * the same effect on the device model to receive
1145 * (`MAC_TABLE_ENTRIES` + 1) or more non-multicast MAC addresses.
1146 * The same applies to multicast MAC addresses.
1147 *
1148 * Therefore, QEMU can provide the device model with a fake
1149 * VIRTIO_NET_CTRL_MAC_TABLE_SET command with (`MAC_TABLE_ENTRIES` + 1)
1150 * non-multicast MAC addresses and (`MAC_TABLE_ENTRIES` + 1) multicast
1151 * MAC addresses. This ensures that the device model marks
1152 * `n->mac_table.uni_overflow` and `n->mac_table.multi_overflow`,
1153 * allowing all packets to be received, which aligns with the
1154 * state of the vdpa device.
1155 */
1156 static int vhost_vdpa_net_excessive_mac_filter_cvq_add(VhostVDPAState *s,
1157 VirtQueueElement *elem,
1158 struct iovec *out,
1159 const struct iovec *in)
1160 {
1161 struct virtio_net_ctrl_mac mac_data, *mac_ptr;
1162 struct virtio_net_ctrl_hdr *hdr_ptr;
1163 uint32_t cursor;
1164 ssize_t r;
1165 uint8_t on = 1;
1166
1167 /* parse the non-multicast MAC address entries from CVQ command */
1168 cursor = sizeof(*hdr_ptr);
1169 r = iov_to_buf(elem->out_sg, elem->out_num, cursor,
1170 &mac_data, sizeof(mac_data));
1171 if (unlikely(r != sizeof(mac_data))) {
1172 /*
1173 * If the CVQ command is invalid, we should simulate the vdpa device
1174 * to reject the VIRTIO_NET_CTRL_MAC_TABLE_SET CVQ command
1175 */
1176 *s->status = VIRTIO_NET_ERR;
1177 return sizeof(*s->status);
1178 }
1179 cursor += sizeof(mac_data) + le32_to_cpu(mac_data.entries) * ETH_ALEN;
1180
1181 /* parse the multicast MAC address entries from CVQ command */
1182 r = iov_to_buf(elem->out_sg, elem->out_num, cursor,
1183 &mac_data, sizeof(mac_data));
1184 if (r != sizeof(mac_data)) {
1185 /*
1186 * If the CVQ command is invalid, we should simulate the vdpa device
1187 * to reject the VIRTIO_NET_CTRL_MAC_TABLE_SET CVQ command
1188 */
1189 *s->status = VIRTIO_NET_ERR;
1190 return sizeof(*s->status);
1191 }
1192 cursor += sizeof(mac_data) + le32_to_cpu(mac_data.entries) * ETH_ALEN;
1193
1194 /* validate the CVQ command */
1195 if (iov_size(elem->out_sg, elem->out_num) != cursor) {
1196 /*
1197 * If the CVQ command is invalid, we should simulate the vdpa device
1198 * to reject the VIRTIO_NET_CTRL_MAC_TABLE_SET CVQ command
1199 */
1200 *s->status = VIRTIO_NET_ERR;
1201 return sizeof(*s->status);
1202 }
1203
1204 /*
1205 * According to VirtIO standard, "Since there are no guarantees,
1206 * it can use a hash filter or silently switch to allmulti or
1207 * promiscuous mode if it is given too many addresses.".
1208 *
1209 * Therefore, considering that QEMU is unable to send the entire
1210 * filter table to the vdpa device, it should send the
1211 * VIRTIO_NET_CTRL_RX_PROMISC CVQ command to enable promiscuous mode
1212 */
1213 hdr_ptr = out->iov_base;
1214 out->iov_len = sizeof(*hdr_ptr) + sizeof(on);
1215
1216 hdr_ptr->class = VIRTIO_NET_CTRL_RX;
1217 hdr_ptr->cmd = VIRTIO_NET_CTRL_RX_PROMISC;
1218 iov_from_buf(out, 1, sizeof(*hdr_ptr), &on, sizeof(on));
1219 r = vhost_vdpa_net_cvq_add(s, out, 1, in, 1);
1220 if (unlikely(r < 0)) {
1221 return r;
1222 }
1223
1224 /*
1225 * We can poll here since we've had BQL from the time
1226 * we sent the descriptor.
1227 */
1228 r = vhost_vdpa_net_svq_poll(s, 1);
1229 if (unlikely(r < sizeof(*s->status))) {
1230 return r;
1231 }
1232 if (*s->status != VIRTIO_NET_OK) {
1233 return sizeof(*s->status);
1234 }
1235
1236 /*
1237 * QEMU should also send a fake VIRTIO_NET_CTRL_MAC_TABLE_SET CVQ
1238 * command to the device model, including (`MAC_TABLE_ENTRIES` + 1)
1239 * non-multicast MAC addresses and (`MAC_TABLE_ENTRIES` + 1)
1240 * multicast MAC addresses.
1241 *
1242 * By doing so, the device model can mark `n->mac_table.uni_overflow`
1243 * and `n->mac_table.multi_overflow`, enabling all packets to be
1244 * received, which aligns with the state of the vdpa device.
1245 */
1246 cursor = 0;
1247 uint32_t fake_uni_entries = MAC_TABLE_ENTRIES + 1,
1248 fake_mul_entries = MAC_TABLE_ENTRIES + 1,
1249 fake_cvq_size = sizeof(struct virtio_net_ctrl_hdr) +
1250 sizeof(mac_data) + fake_uni_entries * ETH_ALEN +
1251 sizeof(mac_data) + fake_mul_entries * ETH_ALEN;
1252
1253 assert(fake_cvq_size < vhost_vdpa_net_cvq_cmd_page_len());
1254 out->iov_len = fake_cvq_size;
1255
1256 /* pack the header for fake CVQ command */
1257 hdr_ptr = out->iov_base + cursor;
1258 hdr_ptr->class = VIRTIO_NET_CTRL_MAC;
1259 hdr_ptr->cmd = VIRTIO_NET_CTRL_MAC_TABLE_SET;
1260 cursor += sizeof(*hdr_ptr);
1261
1262 /*
1263 * Pack the non-multicast MAC addresses part for fake CVQ command.
1264 *
1265 * According to virtio_net_handle_mac(), QEMU doesn't verify the MAC
1266 * addresses provided in CVQ command. Therefore, only the entries
1267 * field need to be prepared in the CVQ command.
1268 */
1269 mac_ptr = out->iov_base + cursor;
1270 mac_ptr->entries = cpu_to_le32(fake_uni_entries);
1271 cursor += sizeof(*mac_ptr) + fake_uni_entries * ETH_ALEN;
1272
1273 /*
1274 * Pack the multicast MAC addresses part for fake CVQ command.
1275 *
1276 * According to virtio_net_handle_mac(), QEMU doesn't verify the MAC
1277 * addresses provided in CVQ command. Therefore, only the entries
1278 * field need to be prepared in the CVQ command.
1279 */
1280 mac_ptr = out->iov_base + cursor;
1281 mac_ptr->entries = cpu_to_le32(fake_mul_entries);
1282
1283 /*
1284 * Simulating QEMU poll a vdpa device used buffer
1285 * for VIRTIO_NET_CTRL_MAC_TABLE_SET CVQ command
1286 */
1287 return sizeof(*s->status);
1288 }
1289
1290 /**
1291 * Validate and copy control virtqueue commands.
1292 *
1293 * Following QEMU guidelines, we offer a copy of the buffers to the device to
1294 * prevent TOCTOU bugs.
1295 */
1296 static int vhost_vdpa_net_handle_ctrl_avail(VhostShadowVirtqueue *svq,
1297 VirtQueueElement *elem,
1298 void *opaque)
1299 {
1300 VhostVDPAState *s = opaque;
1301 size_t in_len;
1302 const struct virtio_net_ctrl_hdr *ctrl;
1303 virtio_net_ctrl_ack status = VIRTIO_NET_ERR;
1304 /* Out buffer sent to both the vdpa device and the device model */
1305 struct iovec out = {
1306 .iov_base = s->cvq_cmd_out_buffer,
1307 };
1308 /* in buffer used for device model */
1309 const struct iovec model_in = {
1310 .iov_base = &status,
1311 .iov_len = sizeof(status),
1312 };
1313 /* in buffer used for vdpa device */
1314 const struct iovec vdpa_in = {
1315 .iov_base = s->status,
1316 .iov_len = sizeof(*s->status),
1317 };
1318 ssize_t dev_written = -EINVAL;
1319
1320 out.iov_len = iov_to_buf(elem->out_sg, elem->out_num, 0,
1321 s->cvq_cmd_out_buffer,
1322 vhost_vdpa_net_cvq_cmd_page_len());
1323
1324 ctrl = s->cvq_cmd_out_buffer;
1325 if (ctrl->class == VIRTIO_NET_CTRL_ANNOUNCE) {
1326 /*
1327 * Guest announce capability is emulated by qemu, so don't forward to
1328 * the device.
1329 */
1330 dev_written = sizeof(status);
1331 *s->status = VIRTIO_NET_OK;
1332 } else if (unlikely(ctrl->class == VIRTIO_NET_CTRL_MAC &&
1333 ctrl->cmd == VIRTIO_NET_CTRL_MAC_TABLE_SET &&
1334 iov_size(elem->out_sg, elem->out_num) > out.iov_len)) {
1335 /*
1336 * Due to the size limitation of the out buffer sent to the vdpa device,
1337 * which is determined by vhost_vdpa_net_cvq_cmd_page_len(), excessive
1338 * MAC addresses set by the driver for the filter table can cause
1339 * truncation of the CVQ command in QEMU. As a result, the vdpa device
1340 * rejects the flawed CVQ command.
1341 *
1342 * Therefore, QEMU must handle this situation instead of sending
1343 * the CVQ command directly.
1344 */
1345 dev_written = vhost_vdpa_net_excessive_mac_filter_cvq_add(s, elem,
1346 &out, &vdpa_in);
1347 if (unlikely(dev_written < 0)) {
1348 goto out;
1349 }
1350 } else {
1351 ssize_t r;
1352 r = vhost_vdpa_net_cvq_add(s, &out, 1, &vdpa_in, 1);
1353 if (unlikely(r < 0)) {
1354 dev_written = r;
1355 goto out;
1356 }
1357
1358 /*
1359 * We can poll here since we've had BQL from the time
1360 * we sent the descriptor.
1361 */
1362 dev_written = vhost_vdpa_net_svq_poll(s, 1);
1363 }
1364
1365 if (unlikely(dev_written < sizeof(status))) {
1366 error_report("Insufficient written data (%zu)", dev_written);
1367 goto out;
1368 }
1369
1370 if (*s->status != VIRTIO_NET_OK) {
1371 goto out;
1372 }
1373
1374 status = VIRTIO_NET_ERR;
1375 virtio_net_handle_ctrl_iov(svq->vdev, &model_in, 1, &out, 1);
1376 if (status != VIRTIO_NET_OK) {
1377 error_report("Bad CVQ processing in model");
1378 }
1379
1380 out:
1381 in_len = iov_from_buf(elem->in_sg, elem->in_num, 0, &status,
1382 sizeof(status));
1383 if (unlikely(in_len < sizeof(status))) {
1384 error_report("Bad device CVQ written length");
1385 }
1386 vhost_svq_push_elem(svq, elem, MIN(in_len, sizeof(status)));
1387 /*
1388 * `elem` belongs to vhost_vdpa_net_handle_ctrl_avail() only when
1389 * the function successfully forwards the CVQ command, indicated
1390 * by a non-negative value of `dev_written`. Otherwise, it still
1391 * belongs to SVQ.
1392 * This function should only free the `elem` when it owns.
1393 */
1394 if (dev_written >= 0) {
1395 g_free(elem);
1396 }
1397 return dev_written < 0 ? dev_written : 0;
1398 }
1399
1400 static const VhostShadowVirtqueueOps vhost_vdpa_net_svq_ops = {
1401 .avail_handler = vhost_vdpa_net_handle_ctrl_avail,
1402 };
1403
1404 /**
1405 * Probe if CVQ is isolated
1406 *
1407 * @device_fd The vdpa device fd
1408 * @features Features offered by the device.
1409 * @cvq_index The control vq pair index
1410 *
1411 * Returns <0 in case of failure, 0 if false and 1 if true.
1412 */
1413 static int vhost_vdpa_probe_cvq_isolation(int device_fd, uint64_t features,
1414 int cvq_index, Error **errp)
1415 {
1416 uint64_t backend_features;
1417 int64_t cvq_group;
1418 uint8_t status = VIRTIO_CONFIG_S_ACKNOWLEDGE |
1419 VIRTIO_CONFIG_S_DRIVER;
1420 int r;
1421
1422 ERRP_GUARD();
1423
1424 r = ioctl(device_fd, VHOST_GET_BACKEND_FEATURES, &backend_features);
1425 if (unlikely(r < 0)) {
1426 error_setg_errno(errp, errno, "Cannot get vdpa backend_features");
1427 return r;
1428 }
1429
1430 if (!(backend_features & BIT_ULL(VHOST_BACKEND_F_IOTLB_ASID))) {
1431 return 0;
1432 }
1433
1434 r = ioctl(device_fd, VHOST_VDPA_SET_STATUS, &status);
1435 if (unlikely(r)) {
1436 error_setg_errno(errp, -r, "Cannot set device status");
1437 goto out;
1438 }
1439
1440 r = ioctl(device_fd, VHOST_SET_FEATURES, &features);
1441 if (unlikely(r)) {
1442 error_setg_errno(errp, -r, "Cannot set features");
1443 goto out;
1444 }
1445
1446 status |= VIRTIO_CONFIG_S_FEATURES_OK;
1447 r = ioctl(device_fd, VHOST_VDPA_SET_STATUS, &status);
1448 if (unlikely(r)) {
1449 error_setg_errno(errp, -r, "Cannot set device status");
1450 goto out;
1451 }
1452
1453 cvq_group = vhost_vdpa_get_vring_group(device_fd, cvq_index, errp);
1454 if (unlikely(cvq_group < 0)) {
1455 if (cvq_group != -ENOTSUP) {
1456 r = cvq_group;
1457 goto out;
1458 }
1459
1460 /*
1461 * The kernel report VHOST_BACKEND_F_IOTLB_ASID if the vdpa frontend
1462 * support ASID even if the parent driver does not. The CVQ cannot be
1463 * isolated in this case.
1464 */
1465 error_free(*errp);
1466 *errp = NULL;
1467 r = 0;
1468 goto out;
1469 }
1470
1471 for (int i = 0; i < cvq_index; ++i) {
1472 int64_t group = vhost_vdpa_get_vring_group(device_fd, i, errp);
1473 if (unlikely(group < 0)) {
1474 r = group;
1475 goto out;
1476 }
1477
1478 if (group == (int64_t)cvq_group) {
1479 r = 0;
1480 goto out;
1481 }
1482 }
1483
1484 r = 1;
1485
1486 out:
1487 status = 0;
1488 ioctl(device_fd, VHOST_VDPA_SET_STATUS, &status);
1489 return r;
1490 }
1491
1492 static NetClientState *net_vhost_vdpa_init(NetClientState *peer,
1493 const char *device,
1494 const char *name,
1495 int vdpa_device_fd,
1496 int queue_pair_index,
1497 int nvqs,
1498 bool is_datapath,
1499 bool svq,
1500 struct vhost_vdpa_iova_range iova_range,
1501 uint64_t features,
1502 Error **errp)
1503 {
1504 NetClientState *nc = NULL;
1505 VhostVDPAState *s;
1506 int ret = 0;
1507 assert(name);
1508 int cvq_isolated = 0;
1509
1510 if (is_datapath) {
1511 nc = qemu_new_net_client(&net_vhost_vdpa_info, peer, device,
1512 name);
1513 } else {
1514 cvq_isolated = vhost_vdpa_probe_cvq_isolation(vdpa_device_fd, features,
1515 queue_pair_index * 2,
1516 errp);
1517 if (unlikely(cvq_isolated < 0)) {
1518 return NULL;
1519 }
1520
1521 nc = qemu_new_net_control_client(&net_vhost_vdpa_cvq_info, peer,
1522 device, name);
1523 }
1524 qemu_set_info_str(nc, TYPE_VHOST_VDPA);
1525 s = DO_UPCAST(VhostVDPAState, nc, nc);
1526
1527 s->vhost_vdpa.device_fd = vdpa_device_fd;
1528 s->vhost_vdpa.index = queue_pair_index;
1529 s->always_svq = svq;
1530 s->migration_state.notify = vdpa_net_migration_state_notifier;
1531 s->vhost_vdpa.shadow_vqs_enabled = svq;
1532 s->vhost_vdpa.iova_range = iova_range;
1533 s->vhost_vdpa.shadow_data = svq;
1534 if (queue_pair_index == 0) {
1535 vhost_vdpa_net_valid_svq_features(features,
1536 &s->vhost_vdpa.migration_blocker);
1537 } else if (!is_datapath) {
1538 s->cvq_cmd_out_buffer = mmap(NULL, vhost_vdpa_net_cvq_cmd_page_len(),
1539 PROT_READ | PROT_WRITE,
1540 MAP_SHARED | MAP_ANONYMOUS, -1, 0);
1541 s->status = mmap(NULL, vhost_vdpa_net_cvq_cmd_page_len(),
1542 PROT_READ | PROT_WRITE, MAP_SHARED | MAP_ANONYMOUS,
1543 -1, 0);
1544
1545 s->vhost_vdpa.shadow_vq_ops = &vhost_vdpa_net_svq_ops;
1546 s->vhost_vdpa.shadow_vq_ops_opaque = s;
1547 s->cvq_isolated = cvq_isolated;
1548 }
1549 ret = vhost_vdpa_add(nc, (void *)&s->vhost_vdpa, queue_pair_index, nvqs);
1550 if (ret) {
1551 qemu_del_net_client(nc);
1552 return NULL;
1553 }
1554 return nc;
1555 }
1556
1557 static int vhost_vdpa_get_features(int fd, uint64_t *features, Error **errp)
1558 {
1559 int ret = ioctl(fd, VHOST_GET_FEATURES, features);
1560 if (unlikely(ret < 0)) {
1561 error_setg_errno(errp, errno,
1562 "Fail to query features from vhost-vDPA device");
1563 }
1564 return ret;
1565 }
1566
1567 static int vhost_vdpa_get_max_queue_pairs(int fd, uint64_t features,
1568 int *has_cvq, Error **errp)
1569 {
1570 unsigned long config_size = offsetof(struct vhost_vdpa_config, buf);
1571 g_autofree struct vhost_vdpa_config *config = NULL;
1572 __virtio16 *max_queue_pairs;
1573 int ret;
1574
1575 if (features & (1 << VIRTIO_NET_F_CTRL_VQ)) {
1576 *has_cvq = 1;
1577 } else {
1578 *has_cvq = 0;
1579 }
1580
1581 if (features & (1 << VIRTIO_NET_F_MQ)) {
1582 config = g_malloc0(config_size + sizeof(*max_queue_pairs));
1583 config->off = offsetof(struct virtio_net_config, max_virtqueue_pairs);
1584 config->len = sizeof(*max_queue_pairs);
1585
1586 ret = ioctl(fd, VHOST_VDPA_GET_CONFIG, config);
1587 if (ret) {
1588 error_setg(errp, "Fail to get config from vhost-vDPA device");
1589 return -ret;
1590 }
1591
1592 max_queue_pairs = (__virtio16 *)&config->buf;
1593
1594 return lduw_le_p(max_queue_pairs);
1595 }
1596
1597 return 1;
1598 }
1599
1600 int net_init_vhost_vdpa(const Netdev *netdev, const char *name,
1601 NetClientState *peer, Error **errp)
1602 {
1603 const NetdevVhostVDPAOptions *opts;
1604 uint64_t features;
1605 int vdpa_device_fd;
1606 g_autofree NetClientState **ncs = NULL;
1607 struct vhost_vdpa_iova_range iova_range;
1608 NetClientState *nc;
1609 int queue_pairs, r, i = 0, has_cvq = 0;
1610
1611 assert(netdev->type == NET_CLIENT_DRIVER_VHOST_VDPA);
1612 opts = &netdev->u.vhost_vdpa;
1613 if (!opts->vhostdev && !opts->vhostfd) {
1614 error_setg(errp,
1615 "vhost-vdpa: neither vhostdev= nor vhostfd= was specified");
1616 return -1;
1617 }
1618
1619 if (opts->vhostdev && opts->vhostfd) {
1620 error_setg(errp,
1621 "vhost-vdpa: vhostdev= and vhostfd= are mutually exclusive");
1622 return -1;
1623 }
1624
1625 if (opts->vhostdev) {
1626 vdpa_device_fd = qemu_open(opts->vhostdev, O_RDWR, errp);
1627 if (vdpa_device_fd == -1) {
1628 return -errno;
1629 }
1630 } else {
1631 /* has_vhostfd */
1632 vdpa_device_fd = monitor_fd_param(monitor_cur(), opts->vhostfd, errp);
1633 if (vdpa_device_fd == -1) {
1634 error_prepend(errp, "vhost-vdpa: unable to parse vhostfd: ");
1635 return -1;
1636 }
1637 }
1638
1639 r = vhost_vdpa_get_features(vdpa_device_fd, &features, errp);
1640 if (unlikely(r < 0)) {
1641 goto err;
1642 }
1643
1644 queue_pairs = vhost_vdpa_get_max_queue_pairs(vdpa_device_fd, features,
1645 &has_cvq, errp);
1646 if (queue_pairs < 0) {
1647 qemu_close(vdpa_device_fd);
1648 return queue_pairs;
1649 }
1650
1651 r = vhost_vdpa_get_iova_range(vdpa_device_fd, &iova_range);
1652 if (unlikely(r < 0)) {
1653 error_setg(errp, "vhost-vdpa: get iova range failed: %s",
1654 strerror(-r));
1655 goto err;
1656 }
1657
1658 if (opts->x_svq && !vhost_vdpa_net_valid_svq_features(features, errp)) {
1659 goto err;
1660 }
1661
1662 ncs = g_malloc0(sizeof(*ncs) * queue_pairs);
1663
1664 for (i = 0; i < queue_pairs; i++) {
1665 ncs[i] = net_vhost_vdpa_init(peer, TYPE_VHOST_VDPA, name,
1666 vdpa_device_fd, i, 2, true, opts->x_svq,
1667 iova_range, features, errp);
1668 if (!ncs[i])
1669 goto err;
1670 }
1671
1672 if (has_cvq) {
1673 nc = net_vhost_vdpa_init(peer, TYPE_VHOST_VDPA, name,
1674 vdpa_device_fd, i, 1, false,
1675 opts->x_svq, iova_range, features, errp);
1676 if (!nc)
1677 goto err;
1678 }
1679
1680 return 0;
1681
1682 err:
1683 if (i) {
1684 for (i--; i >= 0; i--) {
1685 qemu_del_net_client(ncs[i]);
1686 }
1687 }
1688
1689 qemu_close(vdpa_device_fd);
1690
1691 return -1;
1692 }