4 * Copyright(c) 2017-2018 Intel Corporation.
5 * Copyright(c) 2020 Red Hat, Inc.
7 * This work is licensed under the terms of the GNU GPL, version 2 or later.
8 * See the COPYING file in the top-level directory.
12 #include "qemu/osdep.h"
14 #include "hw/virtio/virtio-net.h"
15 #include "net/vhost_net.h"
16 #include "net/vhost-vdpa.h"
17 #include "hw/virtio/vhost-vdpa.h"
18 #include "qemu/config-file.h"
19 #include "qemu/error-report.h"
21 #include "qemu/memalign.h"
22 #include "qemu/option.h"
23 #include "qapi/error.h"
24 #include <linux/vhost.h>
25 #include <sys/ioctl.h>
27 #include "standard-headers/linux/virtio_net.h"
28 #include "monitor/monitor.h"
29 #include "migration/migration.h"
30 #include "migration/misc.h"
31 #include "hw/virtio/vhost.h"
33 /* Todo:need to add the multiqueue support here */
34 typedef struct VhostVDPAState
{
36 struct vhost_vdpa vhost_vdpa
;
37 Notifier migration_state
;
38 VHostNetState
*vhost_net
;
40 /* Control commands shadow buffers */
41 void *cvq_cmd_out_buffer
;
42 virtio_net_ctrl_ack
*status
;
44 /* The device always have SVQ enabled */
47 /* The device can isolate CVQ in its own ASID */
54 * The array is sorted alphabetically in ascending order,
55 * with the exception of VHOST_INVALID_FEATURE_BIT,
56 * which should always be the last entry.
58 const int vdpa_feature_bits
[] = {
60 VIRTIO_F_IOMMU_PLATFORM
,
61 VIRTIO_F_NOTIFY_ON_EMPTY
,
66 VIRTIO_NET_F_CTRL_GUEST_OFFLOADS
,
67 VIRTIO_NET_F_CTRL_MAC_ADDR
,
69 VIRTIO_NET_F_CTRL_RX_EXTRA
,
70 VIRTIO_NET_F_CTRL_VLAN
,
73 VIRTIO_NET_F_GUEST_CSUM
,
74 VIRTIO_NET_F_GUEST_ECN
,
75 VIRTIO_NET_F_GUEST_TSO4
,
76 VIRTIO_NET_F_GUEST_TSO6
,
77 VIRTIO_NET_F_GUEST_UFO
,
78 VIRTIO_NET_F_GUEST_USO4
,
79 VIRTIO_NET_F_GUEST_USO6
,
80 VIRTIO_NET_F_HASH_REPORT
,
81 VIRTIO_NET_F_HOST_ECN
,
82 VIRTIO_NET_F_HOST_TSO4
,
83 VIRTIO_NET_F_HOST_TSO6
,
84 VIRTIO_NET_F_HOST_UFO
,
85 VIRTIO_NET_F_HOST_USO
,
87 VIRTIO_NET_F_MRG_RXBUF
,
91 VIRTIO_RING_F_EVENT_IDX
,
92 VIRTIO_RING_F_INDIRECT_DESC
,
94 /* VHOST_INVALID_FEATURE_BIT should always be the last entry */
95 VHOST_INVALID_FEATURE_BIT
98 /** Supported device specific feature bits with SVQ */
99 static const uint64_t vdpa_svq_device_features
=
100 BIT_ULL(VIRTIO_NET_F_CSUM
) |
101 BIT_ULL(VIRTIO_NET_F_GUEST_CSUM
) |
102 BIT_ULL(VIRTIO_NET_F_CTRL_GUEST_OFFLOADS
) |
103 BIT_ULL(VIRTIO_NET_F_MTU
) |
104 BIT_ULL(VIRTIO_NET_F_MAC
) |
105 BIT_ULL(VIRTIO_NET_F_GUEST_TSO4
) |
106 BIT_ULL(VIRTIO_NET_F_GUEST_TSO6
) |
107 BIT_ULL(VIRTIO_NET_F_GUEST_ECN
) |
108 BIT_ULL(VIRTIO_NET_F_GUEST_UFO
) |
109 BIT_ULL(VIRTIO_NET_F_HOST_TSO4
) |
110 BIT_ULL(VIRTIO_NET_F_HOST_TSO6
) |
111 BIT_ULL(VIRTIO_NET_F_HOST_ECN
) |
112 BIT_ULL(VIRTIO_NET_F_HOST_UFO
) |
113 BIT_ULL(VIRTIO_NET_F_MRG_RXBUF
) |
114 BIT_ULL(VIRTIO_NET_F_STATUS
) |
115 BIT_ULL(VIRTIO_NET_F_CTRL_VQ
) |
116 BIT_ULL(VIRTIO_NET_F_CTRL_RX
) |
117 BIT_ULL(VIRTIO_NET_F_CTRL_VLAN
) |
118 BIT_ULL(VIRTIO_NET_F_CTRL_RX_EXTRA
) |
119 BIT_ULL(VIRTIO_NET_F_MQ
) |
120 BIT_ULL(VIRTIO_F_ANY_LAYOUT
) |
121 BIT_ULL(VIRTIO_NET_F_CTRL_MAC_ADDR
) |
122 /* VHOST_F_LOG_ALL is exposed by SVQ */
123 BIT_ULL(VHOST_F_LOG_ALL
) |
124 BIT_ULL(VIRTIO_NET_F_HASH_REPORT
) |
125 BIT_ULL(VIRTIO_NET_F_RSC_EXT
) |
126 BIT_ULL(VIRTIO_NET_F_STANDBY
) |
127 BIT_ULL(VIRTIO_NET_F_SPEED_DUPLEX
);
129 #define VHOST_VDPA_NET_CVQ_ASID 1
131 VHostNetState
*vhost_vdpa_get_vhost_net(NetClientState
*nc
)
133 VhostVDPAState
*s
= DO_UPCAST(VhostVDPAState
, nc
, nc
);
134 assert(nc
->info
->type
== NET_CLIENT_DRIVER_VHOST_VDPA
);
138 static size_t vhost_vdpa_net_cvq_cmd_len(void)
141 * MAC_TABLE_SET is the ctrl command that produces the longer out buffer.
142 * In buffer is always 1 byte, so it should fit here
144 return sizeof(struct virtio_net_ctrl_hdr
) +
145 2 * sizeof(struct virtio_net_ctrl_mac
) +
146 MAC_TABLE_ENTRIES
* ETH_ALEN
;
149 static size_t vhost_vdpa_net_cvq_cmd_page_len(void)
151 return ROUND_UP(vhost_vdpa_net_cvq_cmd_len(), qemu_real_host_page_size());
154 static bool vhost_vdpa_net_valid_svq_features(uint64_t features
, Error
**errp
)
156 uint64_t invalid_dev_features
=
157 features
& ~vdpa_svq_device_features
&
158 /* Transport are all accepted at this point */
159 ~MAKE_64BIT_MASK(VIRTIO_TRANSPORT_F_START
,
160 VIRTIO_TRANSPORT_F_END
- VIRTIO_TRANSPORT_F_START
);
162 if (invalid_dev_features
) {
163 error_setg(errp
, "vdpa svq does not work with features 0x%" PRIx64
,
164 invalid_dev_features
);
168 return vhost_svq_valid_features(features
, errp
);
171 static int vhost_vdpa_net_check_device_id(struct vhost_net
*net
)
175 struct vhost_dev
*hdev
;
177 hdev
= (struct vhost_dev
*)&net
->dev
;
178 ret
= hdev
->vhost_ops
->vhost_get_device_id(hdev
, &device_id
);
179 if (device_id
!= VIRTIO_ID_NET
) {
185 static int vhost_vdpa_add(NetClientState
*ncs
, void *be
,
186 int queue_pair_index
, int nvqs
)
188 VhostNetOptions options
;
189 struct vhost_net
*net
= NULL
;
193 options
.backend_type
= VHOST_BACKEND_TYPE_VDPA
;
194 assert(ncs
->info
->type
== NET_CLIENT_DRIVER_VHOST_VDPA
);
195 s
= DO_UPCAST(VhostVDPAState
, nc
, ncs
);
196 options
.net_backend
= ncs
;
198 options
.busyloop_timeout
= 0;
201 net
= vhost_net_init(&options
);
203 error_report("failed to init vhost_net for queue");
207 ret
= vhost_vdpa_net_check_device_id(net
);
213 vhost_net_cleanup(net
);
219 static void vhost_vdpa_cleanup(NetClientState
*nc
)
221 VhostVDPAState
*s
= DO_UPCAST(VhostVDPAState
, nc
, nc
);
224 * If a peer NIC is attached, do not cleanup anything.
225 * Cleanup will happen as a part of qemu_cleanup() -> net_cleanup()
226 * when the guest is shutting down.
228 if (nc
->peer
&& nc
->peer
->info
->type
== NET_CLIENT_DRIVER_NIC
) {
231 munmap(s
->cvq_cmd_out_buffer
, vhost_vdpa_net_cvq_cmd_page_len());
232 munmap(s
->status
, vhost_vdpa_net_cvq_cmd_page_len());
234 vhost_net_cleanup(s
->vhost_net
);
235 g_free(s
->vhost_net
);
238 if (s
->vhost_vdpa
.device_fd
>= 0) {
239 qemu_close(s
->vhost_vdpa
.device_fd
);
240 s
->vhost_vdpa
.device_fd
= -1;
244 /** Dummy SetSteeringEBPF to support RSS for vhost-vdpa backend */
245 static bool vhost_vdpa_set_steering_ebpf(NetClientState
*nc
, int prog_fd
)
250 static bool vhost_vdpa_has_vnet_hdr(NetClientState
*nc
)
252 assert(nc
->info
->type
== NET_CLIENT_DRIVER_VHOST_VDPA
);
257 static bool vhost_vdpa_has_ufo(NetClientState
*nc
)
259 assert(nc
->info
->type
== NET_CLIENT_DRIVER_VHOST_VDPA
);
260 VhostVDPAState
*s
= DO_UPCAST(VhostVDPAState
, nc
, nc
);
261 uint64_t features
= 0;
262 features
|= (1ULL << VIRTIO_NET_F_HOST_UFO
);
263 features
= vhost_net_get_features(s
->vhost_net
, features
);
264 return !!(features
& (1ULL << VIRTIO_NET_F_HOST_UFO
));
268 static bool vhost_vdpa_check_peer_type(NetClientState
*nc
, ObjectClass
*oc
,
271 const char *driver
= object_class_get_name(oc
);
273 if (!g_str_has_prefix(driver
, "virtio-net-")) {
274 error_setg(errp
, "vhost-vdpa requires frontend driver virtio-net-*");
281 /** Dummy receive in case qemu falls back to userland tap networking */
282 static ssize_t
vhost_vdpa_receive(NetClientState
*nc
, const uint8_t *buf
,
288 /** From any vdpa net client, get the netclient of the first queue pair */
289 static VhostVDPAState
*vhost_vdpa_net_first_nc_vdpa(VhostVDPAState
*s
)
291 NICState
*nic
= qemu_get_nic(s
->nc
.peer
);
292 NetClientState
*nc0
= qemu_get_peer(nic
->ncs
, 0);
294 return DO_UPCAST(VhostVDPAState
, nc
, nc0
);
297 static void vhost_vdpa_net_log_global_enable(VhostVDPAState
*s
, bool enable
)
299 struct vhost_vdpa
*v
= &s
->vhost_vdpa
;
302 int data_queue_pairs
, cvq
, r
;
304 /* We are only called on the first data vqs and only if x-svq is not set */
305 if (s
->vhost_vdpa
.shadow_vqs_enabled
== enable
) {
310 n
= VIRTIO_NET(vdev
);
311 if (!n
->vhost_started
) {
315 data_queue_pairs
= n
->multiqueue
? n
->max_queue_pairs
: 1;
316 cvq
= virtio_vdev_has_feature(vdev
, VIRTIO_NET_F_CTRL_VQ
) ?
317 n
->max_ncs
- n
->max_queue_pairs
: 0;
319 * TODO: vhost_net_stop does suspend, get_base and reset. We can be smarter
320 * in the future and resume the device if read-only operations between
321 * suspend and reset goes wrong.
323 vhost_net_stop(vdev
, n
->nic
->ncs
, data_queue_pairs
, cvq
);
325 /* Start will check migration setup_or_active to configure or not SVQ */
326 r
= vhost_net_start(vdev
, n
->nic
->ncs
, data_queue_pairs
, cvq
);
327 if (unlikely(r
< 0)) {
328 error_report("unable to start vhost net: %s(%d)", g_strerror(-r
), -r
);
332 static void vdpa_net_migration_state_notifier(Notifier
*notifier
, void *data
)
334 MigrationState
*migration
= data
;
335 VhostVDPAState
*s
= container_of(notifier
, VhostVDPAState
,
338 if (migration_in_setup(migration
)) {
339 vhost_vdpa_net_log_global_enable(s
, true);
340 } else if (migration_has_failed(migration
)) {
341 vhost_vdpa_net_log_global_enable(s
, false);
345 static void vhost_vdpa_net_data_start_first(VhostVDPAState
*s
)
347 struct vhost_vdpa
*v
= &s
->vhost_vdpa
;
349 migration_add_notifier(&s
->migration_state
,
350 vdpa_net_migration_state_notifier
);
351 if (v
->shadow_vqs_enabled
) {
352 v
->iova_tree
= vhost_iova_tree_new(v
->iova_range
.first
,
357 static int vhost_vdpa_net_data_start(NetClientState
*nc
)
359 VhostVDPAState
*s
= DO_UPCAST(VhostVDPAState
, nc
, nc
);
360 struct vhost_vdpa
*v
= &s
->vhost_vdpa
;
362 assert(nc
->info
->type
== NET_CLIENT_DRIVER_VHOST_VDPA
);
365 migration_is_setup_or_active(migrate_get_current()->state
)) {
366 v
->shadow_vqs_enabled
= true;
367 v
->shadow_data
= true;
369 v
->shadow_vqs_enabled
= false;
370 v
->shadow_data
= false;
374 vhost_vdpa_net_data_start_first(s
);
378 if (v
->shadow_vqs_enabled
) {
379 VhostVDPAState
*s0
= vhost_vdpa_net_first_nc_vdpa(s
);
380 v
->iova_tree
= s0
->vhost_vdpa
.iova_tree
;
386 static int vhost_vdpa_net_data_load(NetClientState
*nc
)
388 VhostVDPAState
*s
= DO_UPCAST(VhostVDPAState
, nc
, nc
);
389 struct vhost_vdpa
*v
= &s
->vhost_vdpa
;
390 bool has_cvq
= v
->dev
->vq_index_end
% 2;
396 for (int i
= 0; i
< v
->dev
->nvqs
; ++i
) {
397 vhost_vdpa_set_vring_ready(v
, i
+ v
->dev
->vq_index
);
402 static void vhost_vdpa_net_client_stop(NetClientState
*nc
)
404 VhostVDPAState
*s
= DO_UPCAST(VhostVDPAState
, nc
, nc
);
405 struct vhost_dev
*dev
;
407 assert(nc
->info
->type
== NET_CLIENT_DRIVER_VHOST_VDPA
);
409 if (s
->vhost_vdpa
.index
== 0) {
410 migration_remove_notifier(&s
->migration_state
);
413 dev
= s
->vhost_vdpa
.dev
;
414 if (dev
->vq_index
+ dev
->nvqs
== dev
->vq_index_end
) {
415 g_clear_pointer(&s
->vhost_vdpa
.iova_tree
, vhost_iova_tree_delete
);
417 s
->vhost_vdpa
.iova_tree
= NULL
;
421 static NetClientInfo net_vhost_vdpa_info
= {
422 .type
= NET_CLIENT_DRIVER_VHOST_VDPA
,
423 .size
= sizeof(VhostVDPAState
),
424 .receive
= vhost_vdpa_receive
,
425 .start
= vhost_vdpa_net_data_start
,
426 .load
= vhost_vdpa_net_data_load
,
427 .stop
= vhost_vdpa_net_client_stop
,
428 .cleanup
= vhost_vdpa_cleanup
,
429 .has_vnet_hdr
= vhost_vdpa_has_vnet_hdr
,
430 .has_ufo
= vhost_vdpa_has_ufo
,
431 .check_peer_type
= vhost_vdpa_check_peer_type
,
432 .set_steering_ebpf
= vhost_vdpa_set_steering_ebpf
,
435 static int64_t vhost_vdpa_get_vring_group(int device_fd
, unsigned vq_index
,
438 struct vhost_vring_state state
= {
441 int r
= ioctl(device_fd
, VHOST_VDPA_GET_VRING_GROUP
, &state
);
443 if (unlikely(r
< 0)) {
445 error_setg_errno(errp
, errno
, "Cannot get VQ %u group", vq_index
);
452 static int vhost_vdpa_set_address_space_id(struct vhost_vdpa
*v
,
456 struct vhost_vring_state asid
= {
462 r
= ioctl(v
->device_fd
, VHOST_VDPA_SET_GROUP_ASID
, &asid
);
463 if (unlikely(r
< 0)) {
464 error_report("Can't set vq group %u asid %u, errno=%d (%s)",
465 asid
.index
, asid
.num
, errno
, g_strerror(errno
));
470 static void vhost_vdpa_cvq_unmap_buf(struct vhost_vdpa
*v
, void *addr
)
472 VhostIOVATree
*tree
= v
->iova_tree
;
475 * No need to specify size or to look for more translations since
476 * this contiguous chunk was allocated by us.
478 .translated_addr
= (hwaddr
)(uintptr_t)addr
,
480 const DMAMap
*map
= vhost_iova_tree_find_iova(tree
, &needle
);
483 if (unlikely(!map
)) {
484 error_report("Cannot locate expected map");
488 r
= vhost_vdpa_dma_unmap(v
, v
->address_space_id
, map
->iova
, map
->size
+ 1);
489 if (unlikely(r
!= 0)) {
490 error_report("Device cannot unmap: %s(%d)", g_strerror(r
), r
);
493 vhost_iova_tree_remove(tree
, *map
);
496 /** Map CVQ buffer. */
497 static int vhost_vdpa_cvq_map_buf(struct vhost_vdpa
*v
, void *buf
, size_t size
,
503 map
.translated_addr
= (hwaddr
)(uintptr_t)buf
;
505 map
.perm
= write
? IOMMU_RW
: IOMMU_RO
,
506 r
= vhost_iova_tree_map_alloc(v
->iova_tree
, &map
);
507 if (unlikely(r
!= IOVA_OK
)) {
508 error_report("Cannot map injected element");
512 r
= vhost_vdpa_dma_map(v
, v
->address_space_id
, map
.iova
,
513 vhost_vdpa_net_cvq_cmd_page_len(), buf
, !write
);
514 if (unlikely(r
< 0)) {
521 vhost_iova_tree_remove(v
->iova_tree
, map
);
525 static int vhost_vdpa_net_cvq_start(NetClientState
*nc
)
527 VhostVDPAState
*s
, *s0
;
528 struct vhost_vdpa
*v
;
533 assert(nc
->info
->type
== NET_CLIENT_DRIVER_VHOST_VDPA
);
535 s
= DO_UPCAST(VhostVDPAState
, nc
, nc
);
538 s0
= vhost_vdpa_net_first_nc_vdpa(s
);
539 v
->shadow_data
= s0
->vhost_vdpa
.shadow_vqs_enabled
;
540 v
->shadow_vqs_enabled
= s0
->vhost_vdpa
.shadow_vqs_enabled
;
541 s
->vhost_vdpa
.address_space_id
= VHOST_VDPA_GUEST_PA_ASID
;
543 if (s
->vhost_vdpa
.shadow_data
) {
544 /* SVQ is already configured for all virtqueues */
549 * If we early return in these cases SVQ will not be enabled. The migration
550 * will be blocked as long as vhost-vdpa backends will not offer _F_LOG.
552 if (!vhost_vdpa_net_valid_svq_features(v
->dev
->features
, NULL
)) {
556 if (!s
->cvq_isolated
) {
560 cvq_group
= vhost_vdpa_get_vring_group(v
->device_fd
,
561 v
->dev
->vq_index_end
- 1,
563 if (unlikely(cvq_group
< 0)) {
564 error_report_err(err
);
568 r
= vhost_vdpa_set_address_space_id(v
, cvq_group
, VHOST_VDPA_NET_CVQ_ASID
);
569 if (unlikely(r
< 0)) {
573 v
->shadow_vqs_enabled
= true;
574 s
->vhost_vdpa
.address_space_id
= VHOST_VDPA_NET_CVQ_ASID
;
577 if (!s
->vhost_vdpa
.shadow_vqs_enabled
) {
581 if (s0
->vhost_vdpa
.iova_tree
) {
583 * SVQ is already configured for all virtqueues. Reuse IOVA tree for
584 * simplicity, whether CVQ shares ASID with guest or not, because:
585 * - Memory listener need access to guest's memory addresses allocated
587 * - There should be plenty of IOVA address space for both ASID not to
588 * worry about collisions between them. Guest's translations are
589 * still validated with virtio virtqueue_pop so there is no risk for
590 * the guest to access memory that it shouldn't.
592 * To allocate a iova tree per ASID is doable but it complicates the
593 * code and it is not worth it for the moment.
595 v
->iova_tree
= s0
->vhost_vdpa
.iova_tree
;
597 v
->iova_tree
= vhost_iova_tree_new(v
->iova_range
.first
,
601 r
= vhost_vdpa_cvq_map_buf(&s
->vhost_vdpa
, s
->cvq_cmd_out_buffer
,
602 vhost_vdpa_net_cvq_cmd_page_len(), false);
603 if (unlikely(r
< 0)) {
607 r
= vhost_vdpa_cvq_map_buf(&s
->vhost_vdpa
, s
->status
,
608 vhost_vdpa_net_cvq_cmd_page_len(), true);
609 if (unlikely(r
< 0)) {
610 vhost_vdpa_cvq_unmap_buf(&s
->vhost_vdpa
, s
->cvq_cmd_out_buffer
);
616 static void vhost_vdpa_net_cvq_stop(NetClientState
*nc
)
618 VhostVDPAState
*s
= DO_UPCAST(VhostVDPAState
, nc
, nc
);
620 assert(nc
->info
->type
== NET_CLIENT_DRIVER_VHOST_VDPA
);
622 if (s
->vhost_vdpa
.shadow_vqs_enabled
) {
623 vhost_vdpa_cvq_unmap_buf(&s
->vhost_vdpa
, s
->cvq_cmd_out_buffer
);
624 vhost_vdpa_cvq_unmap_buf(&s
->vhost_vdpa
, s
->status
);
627 vhost_vdpa_net_client_stop(nc
);
630 static ssize_t
vhost_vdpa_net_cvq_add(VhostVDPAState
*s
,
631 const struct iovec
*out_sg
, size_t out_num
,
632 const struct iovec
*in_sg
, size_t in_num
)
634 VhostShadowVirtqueue
*svq
= g_ptr_array_index(s
->vhost_vdpa
.shadow_vqs
, 0);
637 r
= vhost_svq_add(svq
, out_sg
, out_num
, in_sg
, in_num
, NULL
);
638 if (unlikely(r
!= 0)) {
639 if (unlikely(r
== -ENOSPC
)) {
640 qemu_log_mask(LOG_GUEST_ERROR
, "%s: No space on device queue\n",
649 * Convenience wrapper to poll SVQ for multiple control commands.
651 * Caller should hold the BQL when invoking this function, and should take
652 * the answer before SVQ pulls by itself when BQL is released.
654 static ssize_t
vhost_vdpa_net_svq_poll(VhostVDPAState
*s
, size_t cmds_in_flight
)
656 VhostShadowVirtqueue
*svq
= g_ptr_array_index(s
->vhost_vdpa
.shadow_vqs
, 0);
657 return vhost_svq_poll(svq
, cmds_in_flight
);
660 static void vhost_vdpa_net_load_cursor_reset(VhostVDPAState
*s
,
661 struct iovec
*out_cursor
,
662 struct iovec
*in_cursor
)
664 /* reset the cursor of the output buffer for the device */
665 out_cursor
->iov_base
= s
->cvq_cmd_out_buffer
;
666 out_cursor
->iov_len
= vhost_vdpa_net_cvq_cmd_page_len();
668 /* reset the cursor of the in buffer for the device */
669 in_cursor
->iov_base
= s
->status
;
670 in_cursor
->iov_len
= vhost_vdpa_net_cvq_cmd_page_len();
674 * Poll SVQ for multiple pending control commands and check the device's ack.
676 * Caller should hold the BQL when invoking this function.
678 * @s: The VhostVDPAState
679 * @len: The length of the pending status shadow buffer
681 static ssize_t
vhost_vdpa_net_svq_flush(VhostVDPAState
*s
, size_t len
)
683 /* device uses a one-byte length ack for each control command */
684 ssize_t dev_written
= vhost_vdpa_net_svq_poll(s
, len
);
685 if (unlikely(dev_written
!= len
)) {
689 /* check the device's ack */
690 for (int i
= 0; i
< len
; ++i
) {
691 if (s
->status
[i
] != VIRTIO_NET_OK
) {
698 static ssize_t
vhost_vdpa_net_load_cmd(VhostVDPAState
*s
,
699 struct iovec
*out_cursor
,
700 struct iovec
*in_cursor
, uint8_t class,
701 uint8_t cmd
, const struct iovec
*data_sg
,
704 const struct virtio_net_ctrl_hdr ctrl
= {
708 size_t data_size
= iov_size(data_sg
, data_num
), cmd_size
;
709 struct iovec out
, in
;
711 unsigned dummy_cursor_iov_cnt
;
712 VhostShadowVirtqueue
*svq
= g_ptr_array_index(s
->vhost_vdpa
.shadow_vqs
, 0);
714 assert(data_size
< vhost_vdpa_net_cvq_cmd_page_len() - sizeof(ctrl
));
715 cmd_size
= sizeof(ctrl
) + data_size
;
716 if (vhost_svq_available_slots(svq
) < 2 ||
717 iov_size(out_cursor
, 1) < cmd_size
) {
719 * It is time to flush all pending control commands if SVQ is full
720 * or control commands shadow buffers are full.
722 * We can poll here since we've had BQL from the time
723 * we sent the descriptor.
725 r
= vhost_vdpa_net_svq_flush(s
, in_cursor
->iov_base
-
727 if (unlikely(r
< 0)) {
731 vhost_vdpa_net_load_cursor_reset(s
, out_cursor
, in_cursor
);
734 /* pack the CVQ command header */
735 iov_from_buf(out_cursor
, 1, 0, &ctrl
, sizeof(ctrl
));
736 /* pack the CVQ command command-specific-data */
737 iov_to_buf(data_sg
, data_num
, 0,
738 out_cursor
->iov_base
+ sizeof(ctrl
), data_size
);
740 /* extract the required buffer from the cursor for output */
741 iov_copy(&out
, 1, out_cursor
, 1, 0, cmd_size
);
742 /* extract the required buffer from the cursor for input */
743 iov_copy(&in
, 1, in_cursor
, 1, 0, sizeof(*s
->status
));
745 r
= vhost_vdpa_net_cvq_add(s
, &out
, 1, &in
, 1);
746 if (unlikely(r
< 0)) {
750 /* iterate the cursors */
751 dummy_cursor_iov_cnt
= 1;
752 iov_discard_front(&out_cursor
, &dummy_cursor_iov_cnt
, cmd_size
);
753 dummy_cursor_iov_cnt
= 1;
754 iov_discard_front(&in_cursor
, &dummy_cursor_iov_cnt
, sizeof(*s
->status
));
759 static int vhost_vdpa_net_load_mac(VhostVDPAState
*s
, const VirtIONet
*n
,
760 struct iovec
*out_cursor
,
761 struct iovec
*in_cursor
)
763 if (virtio_vdev_has_feature(&n
->parent_obj
, VIRTIO_NET_F_CTRL_MAC_ADDR
)) {
764 const struct iovec data
= {
765 .iov_base
= (void *)n
->mac
,
766 .iov_len
= sizeof(n
->mac
),
768 ssize_t r
= vhost_vdpa_net_load_cmd(s
, out_cursor
, in_cursor
,
770 VIRTIO_NET_CTRL_MAC_ADDR_SET
,
772 if (unlikely(r
< 0)) {
778 * According to VirtIO standard, "The device MUST have an
779 * empty MAC filtering table on reset.".
781 * Therefore, there is no need to send this CVQ command if the
782 * driver also sets an empty MAC filter table, which aligns with
783 * the device's defaults.
785 * Note that the device's defaults can mismatch the driver's
786 * configuration only at live migration.
788 if (!virtio_vdev_has_feature(&n
->parent_obj
, VIRTIO_NET_F_CTRL_RX
) ||
789 n
->mac_table
.in_use
== 0) {
793 uint32_t uni_entries
= n
->mac_table
.first_multi
,
794 uni_macs_size
= uni_entries
* ETH_ALEN
,
795 mul_entries
= n
->mac_table
.in_use
- uni_entries
,
796 mul_macs_size
= mul_entries
* ETH_ALEN
;
797 struct virtio_net_ctrl_mac uni
= {
798 .entries
= cpu_to_le32(uni_entries
),
800 struct virtio_net_ctrl_mac mul
= {
801 .entries
= cpu_to_le32(mul_entries
),
803 const struct iovec data
[] = {
806 .iov_len
= sizeof(uni
),
808 .iov_base
= n
->mac_table
.macs
,
809 .iov_len
= uni_macs_size
,
812 .iov_len
= sizeof(mul
),
814 .iov_base
= &n
->mac_table
.macs
[uni_macs_size
],
815 .iov_len
= mul_macs_size
,
818 ssize_t r
= vhost_vdpa_net_load_cmd(s
, out_cursor
, in_cursor
,
820 VIRTIO_NET_CTRL_MAC_TABLE_SET
,
821 data
, ARRAY_SIZE(data
));
822 if (unlikely(r
< 0)) {
829 static int vhost_vdpa_net_load_rss(VhostVDPAState
*s
, const VirtIONet
*n
,
830 struct iovec
*out_cursor
,
831 struct iovec
*in_cursor
, bool do_rss
)
833 struct virtio_net_rss_config cfg
= {};
835 g_autofree
uint16_t *table
= NULL
;
838 * According to VirtIO standard, "Initially the device has all hash
839 * types disabled and reports only VIRTIO_NET_HASH_REPORT_NONE.".
841 * Therefore, there is no need to send this CVQ command if the
842 * driver disables the all hash types, which aligns with
843 * the device's defaults.
845 * Note that the device's defaults can mismatch the driver's
846 * configuration only at live migration.
848 if (!n
->rss_data
.enabled
||
849 n
->rss_data
.hash_types
== VIRTIO_NET_HASH_REPORT_NONE
) {
853 table
= g_malloc_n(n
->rss_data
.indirections_len
,
854 sizeof(n
->rss_data
.indirections_table
[0]));
855 cfg
.hash_types
= cpu_to_le32(n
->rss_data
.hash_types
);
859 * According to VirtIO standard, "Number of entries in indirection_table
860 * is (indirection_table_mask + 1)".
862 cfg
.indirection_table_mask
= cpu_to_le16(n
->rss_data
.indirections_len
-
864 cfg
.unclassified_queue
= cpu_to_le16(n
->rss_data
.default_queue
);
865 for (int i
= 0; i
< n
->rss_data
.indirections_len
; ++i
) {
866 table
[i
] = cpu_to_le16(n
->rss_data
.indirections_table
[i
]);
868 cfg
.max_tx_vq
= cpu_to_le16(n
->curr_queue_pairs
);
871 * According to VirtIO standard, "Field reserved MUST contain zeroes.
872 * It is defined to make the structure to match the layout of
873 * virtio_net_rss_config structure, defined in 5.1.6.5.7.".
875 * Therefore, we need to zero the fields in
876 * struct virtio_net_rss_config, which corresponds to the
877 * `reserved` field in struct virtio_net_hash_config.
879 * Note that all other fields are zeroed at their definitions,
880 * except for the `indirection_table` field, where the actual data
881 * is stored in the `table` variable to ensure compatibility
882 * with RSS case. Therefore, we need to zero the `table` variable here.
888 * Considering that virtio_net_handle_rss() currently does not restore
889 * the hash key length parsed from the CVQ command sent from the guest
890 * into n->rss_data and uses the maximum key length in other code, so
891 * we also employ the maximum key length here.
893 cfg
.hash_key_length
= sizeof(n
->rss_data
.key
);
895 const struct iovec data
[] = {
898 .iov_len
= offsetof(struct virtio_net_rss_config
,
902 .iov_len
= n
->rss_data
.indirections_len
*
903 sizeof(n
->rss_data
.indirections_table
[0]),
905 .iov_base
= &cfg
.max_tx_vq
,
906 .iov_len
= offsetof(struct virtio_net_rss_config
, hash_key_data
) -
907 offsetof(struct virtio_net_rss_config
, max_tx_vq
),
909 .iov_base
= (void *)n
->rss_data
.key
,
910 .iov_len
= sizeof(n
->rss_data
.key
),
914 r
= vhost_vdpa_net_load_cmd(s
, out_cursor
, in_cursor
,
916 do_rss
? VIRTIO_NET_CTRL_MQ_RSS_CONFIG
:
917 VIRTIO_NET_CTRL_MQ_HASH_CONFIG
,
918 data
, ARRAY_SIZE(data
));
919 if (unlikely(r
< 0)) {
926 static int vhost_vdpa_net_load_mq(VhostVDPAState
*s
,
928 struct iovec
*out_cursor
,
929 struct iovec
*in_cursor
)
931 struct virtio_net_ctrl_mq mq
;
934 if (!virtio_vdev_has_feature(&n
->parent_obj
, VIRTIO_NET_F_MQ
)) {
938 mq
.virtqueue_pairs
= cpu_to_le16(n
->curr_queue_pairs
);
939 const struct iovec data
= {
941 .iov_len
= sizeof(mq
),
943 r
= vhost_vdpa_net_load_cmd(s
, out_cursor
, in_cursor
,
945 VIRTIO_NET_CTRL_MQ_VQ_PAIRS_SET
,
947 if (unlikely(r
< 0)) {
951 if (virtio_vdev_has_feature(&n
->parent_obj
, VIRTIO_NET_F_RSS
)) {
952 /* load the receive-side scaling state */
953 r
= vhost_vdpa_net_load_rss(s
, n
, out_cursor
, in_cursor
, true);
954 if (unlikely(r
< 0)) {
957 } else if (virtio_vdev_has_feature(&n
->parent_obj
,
958 VIRTIO_NET_F_HASH_REPORT
)) {
959 /* load the hash calculation state */
960 r
= vhost_vdpa_net_load_rss(s
, n
, out_cursor
, in_cursor
, false);
961 if (unlikely(r
< 0)) {
969 static int vhost_vdpa_net_load_offloads(VhostVDPAState
*s
,
971 struct iovec
*out_cursor
,
972 struct iovec
*in_cursor
)
977 if (!virtio_vdev_has_feature(&n
->parent_obj
,
978 VIRTIO_NET_F_CTRL_GUEST_OFFLOADS
)) {
982 if (n
->curr_guest_offloads
== virtio_net_supported_guest_offloads(n
)) {
984 * According to VirtIO standard, "Upon feature negotiation
985 * corresponding offload gets enabled to preserve
986 * backward compatibility.".
988 * Therefore, there is no need to send this CVQ command if the
989 * driver also enables all supported offloads, which aligns with
990 * the device's defaults.
992 * Note that the device's defaults can mismatch the driver's
993 * configuration only at live migration.
998 offloads
= cpu_to_le64(n
->curr_guest_offloads
);
999 const struct iovec data
= {
1000 .iov_base
= &offloads
,
1001 .iov_len
= sizeof(offloads
),
1003 r
= vhost_vdpa_net_load_cmd(s
, out_cursor
, in_cursor
,
1004 VIRTIO_NET_CTRL_GUEST_OFFLOADS
,
1005 VIRTIO_NET_CTRL_GUEST_OFFLOADS_SET
,
1007 if (unlikely(r
< 0)) {
1014 static int vhost_vdpa_net_load_rx_mode(VhostVDPAState
*s
,
1015 struct iovec
*out_cursor
,
1016 struct iovec
*in_cursor
,
1020 const struct iovec data
= {
1022 .iov_len
= sizeof(on
),
1026 r
= vhost_vdpa_net_load_cmd(s
, out_cursor
, in_cursor
,
1027 VIRTIO_NET_CTRL_RX
, cmd
, &data
, 1);
1028 if (unlikely(r
< 0)) {
1035 static int vhost_vdpa_net_load_rx(VhostVDPAState
*s
,
1037 struct iovec
*out_cursor
,
1038 struct iovec
*in_cursor
)
1042 if (!virtio_vdev_has_feature(&n
->parent_obj
, VIRTIO_NET_F_CTRL_RX
)) {
1047 * According to virtio_net_reset(), device turns promiscuous mode
1050 * Additionally, according to VirtIO standard, "Since there are
1051 * no guarantees, it can use a hash filter or silently switch to
1052 * allmulti or promiscuous mode if it is given too many addresses.".
1053 * QEMU marks `n->mac_table.uni_overflow` if guest sets too many
1054 * non-multicast MAC addresses, indicating that promiscuous mode
1055 * should be enabled.
1057 * Therefore, QEMU should only send this CVQ command if the
1058 * `n->mac_table.uni_overflow` is not marked and `n->promisc` is off,
1059 * which sets promiscuous mode on, different from the device's defaults.
1061 * Note that the device's defaults can mismatch the driver's
1062 * configuration only at live migration.
1064 if (!n
->mac_table
.uni_overflow
&& !n
->promisc
) {
1065 r
= vhost_vdpa_net_load_rx_mode(s
, out_cursor
, in_cursor
,
1066 VIRTIO_NET_CTRL_RX_PROMISC
, 0);
1067 if (unlikely(r
< 0)) {
1073 * According to virtio_net_reset(), device turns all-multicast mode
1076 * According to VirtIO standard, "Since there are no guarantees,
1077 * it can use a hash filter or silently switch to allmulti or
1078 * promiscuous mode if it is given too many addresses.". QEMU marks
1079 * `n->mac_table.multi_overflow` if guest sets too many
1080 * non-multicast MAC addresses.
1082 * Therefore, QEMU should only send this CVQ command if the
1083 * `n->mac_table.multi_overflow` is marked or `n->allmulti` is on,
1084 * which sets all-multicast mode on, different from the device's defaults.
1086 * Note that the device's defaults can mismatch the driver's
1087 * configuration only at live migration.
1089 if (n
->mac_table
.multi_overflow
|| n
->allmulti
) {
1090 r
= vhost_vdpa_net_load_rx_mode(s
, out_cursor
, in_cursor
,
1091 VIRTIO_NET_CTRL_RX_ALLMULTI
, 1);
1092 if (unlikely(r
< 0)) {
1097 if (!virtio_vdev_has_feature(&n
->parent_obj
, VIRTIO_NET_F_CTRL_RX_EXTRA
)) {
1102 * According to virtio_net_reset(), device turns all-unicast mode
1105 * Therefore, QEMU should only send this CVQ command if the driver
1106 * sets all-unicast mode on, different from the device's defaults.
1108 * Note that the device's defaults can mismatch the driver's
1109 * configuration only at live migration.
1112 r
= vhost_vdpa_net_load_rx_mode(s
, out_cursor
, in_cursor
,
1113 VIRTIO_NET_CTRL_RX_ALLUNI
, 1);
1120 * According to virtio_net_reset(), device turns non-multicast mode
1123 * Therefore, QEMU should only send this CVQ command if the driver
1124 * sets non-multicast mode on, different from the device's defaults.
1126 * Note that the device's defaults can mismatch the driver's
1127 * configuration only at live migration.
1130 r
= vhost_vdpa_net_load_rx_mode(s
, out_cursor
, in_cursor
,
1131 VIRTIO_NET_CTRL_RX_NOMULTI
, 1);
1138 * According to virtio_net_reset(), device turns non-unicast mode
1141 * Therefore, QEMU should only send this CVQ command if the driver
1142 * sets non-unicast mode on, different from the device's defaults.
1144 * Note that the device's defaults can mismatch the driver's
1145 * configuration only at live migration.
1148 r
= vhost_vdpa_net_load_rx_mode(s
, out_cursor
, in_cursor
,
1149 VIRTIO_NET_CTRL_RX_NOUNI
, 1);
1156 * According to virtio_net_reset(), device turns non-broadcast mode
1159 * Therefore, QEMU should only send this CVQ command if the driver
1160 * sets non-broadcast mode on, different from the device's defaults.
1162 * Note that the device's defaults can mismatch the driver's
1163 * configuration only at live migration.
1166 r
= vhost_vdpa_net_load_rx_mode(s
, out_cursor
, in_cursor
,
1167 VIRTIO_NET_CTRL_RX_NOBCAST
, 1);
1176 static int vhost_vdpa_net_load_single_vlan(VhostVDPAState
*s
,
1178 struct iovec
*out_cursor
,
1179 struct iovec
*in_cursor
,
1182 const struct iovec data
= {
1184 .iov_len
= sizeof(vid
),
1186 ssize_t r
= vhost_vdpa_net_load_cmd(s
, out_cursor
, in_cursor
,
1187 VIRTIO_NET_CTRL_VLAN
,
1188 VIRTIO_NET_CTRL_VLAN_ADD
,
1190 if (unlikely(r
< 0)) {
1197 static int vhost_vdpa_net_load_vlan(VhostVDPAState
*s
,
1199 struct iovec
*out_cursor
,
1200 struct iovec
*in_cursor
)
1204 if (!virtio_vdev_has_feature(&n
->parent_obj
, VIRTIO_NET_F_CTRL_VLAN
)) {
1208 for (int i
= 0; i
< MAX_VLAN
>> 5; i
++) {
1209 for (int j
= 0; n
->vlans
[i
] && j
<= 0x1f; j
++) {
1210 if (n
->vlans
[i
] & (1U << j
)) {
1211 r
= vhost_vdpa_net_load_single_vlan(s
, n
, out_cursor
,
1212 in_cursor
, (i
<< 5) + j
);
1213 if (unlikely(r
!= 0)) {
1223 static int vhost_vdpa_net_cvq_load(NetClientState
*nc
)
1225 VhostVDPAState
*s
= DO_UPCAST(VhostVDPAState
, nc
, nc
);
1226 struct vhost_vdpa
*v
= &s
->vhost_vdpa
;
1229 struct iovec out_cursor
, in_cursor
;
1231 assert(nc
->info
->type
== NET_CLIENT_DRIVER_VHOST_VDPA
);
1233 vhost_vdpa_set_vring_ready(v
, v
->dev
->vq_index
);
1235 if (v
->shadow_vqs_enabled
) {
1236 n
= VIRTIO_NET(v
->dev
->vdev
);
1237 vhost_vdpa_net_load_cursor_reset(s
, &out_cursor
, &in_cursor
);
1238 r
= vhost_vdpa_net_load_mac(s
, n
, &out_cursor
, &in_cursor
);
1239 if (unlikely(r
< 0)) {
1242 r
= vhost_vdpa_net_load_mq(s
, n
, &out_cursor
, &in_cursor
);
1246 r
= vhost_vdpa_net_load_offloads(s
, n
, &out_cursor
, &in_cursor
);
1250 r
= vhost_vdpa_net_load_rx(s
, n
, &out_cursor
, &in_cursor
);
1254 r
= vhost_vdpa_net_load_vlan(s
, n
, &out_cursor
, &in_cursor
);
1260 * We need to poll and check all pending device's used buffers.
1262 * We can poll here since we've had BQL from the time
1263 * we sent the descriptor.
1265 r
= vhost_vdpa_net_svq_flush(s
, in_cursor
.iov_base
- (void *)s
->status
);
1271 for (int i
= 0; i
< v
->dev
->vq_index
; ++i
) {
1272 vhost_vdpa_set_vring_ready(v
, i
);
1278 static NetClientInfo net_vhost_vdpa_cvq_info
= {
1279 .type
= NET_CLIENT_DRIVER_VHOST_VDPA
,
1280 .size
= sizeof(VhostVDPAState
),
1281 .receive
= vhost_vdpa_receive
,
1282 .start
= vhost_vdpa_net_cvq_start
,
1283 .load
= vhost_vdpa_net_cvq_load
,
1284 .stop
= vhost_vdpa_net_cvq_stop
,
1285 .cleanup
= vhost_vdpa_cleanup
,
1286 .has_vnet_hdr
= vhost_vdpa_has_vnet_hdr
,
1287 .has_ufo
= vhost_vdpa_has_ufo
,
1288 .check_peer_type
= vhost_vdpa_check_peer_type
,
1289 .set_steering_ebpf
= vhost_vdpa_set_steering_ebpf
,
1293 * Forward the excessive VIRTIO_NET_CTRL_MAC_TABLE_SET CVQ command to
1296 * Considering that QEMU cannot send the entire filter table to the
1297 * vdpa device, it should send the VIRTIO_NET_CTRL_RX_PROMISC CVQ
1298 * command to enable promiscuous mode to receive all packets,
1299 * according to VirtIO standard, "Since there are no guarantees,
1300 * it can use a hash filter or silently switch to allmulti or
1301 * promiscuous mode if it is given too many addresses.".
1303 * Since QEMU ignores MAC addresses beyond `MAC_TABLE_ENTRIES` and
1304 * marks `n->mac_table.x_overflow` accordingly, it should have
1305 * the same effect on the device model to receive
1306 * (`MAC_TABLE_ENTRIES` + 1) or more non-multicast MAC addresses.
1307 * The same applies to multicast MAC addresses.
1309 * Therefore, QEMU can provide the device model with a fake
1310 * VIRTIO_NET_CTRL_MAC_TABLE_SET command with (`MAC_TABLE_ENTRIES` + 1)
1311 * non-multicast MAC addresses and (`MAC_TABLE_ENTRIES` + 1) multicast
1312 * MAC addresses. This ensures that the device model marks
1313 * `n->mac_table.uni_overflow` and `n->mac_table.multi_overflow`,
1314 * allowing all packets to be received, which aligns with the
1315 * state of the vdpa device.
1317 static int vhost_vdpa_net_excessive_mac_filter_cvq_add(VhostVDPAState
*s
,
1318 VirtQueueElement
*elem
,
1320 const struct iovec
*in
)
1322 struct virtio_net_ctrl_mac mac_data
, *mac_ptr
;
1323 struct virtio_net_ctrl_hdr
*hdr_ptr
;
1328 /* parse the non-multicast MAC address entries from CVQ command */
1329 cursor
= sizeof(*hdr_ptr
);
1330 r
= iov_to_buf(elem
->out_sg
, elem
->out_num
, cursor
,
1331 &mac_data
, sizeof(mac_data
));
1332 if (unlikely(r
!= sizeof(mac_data
))) {
1334 * If the CVQ command is invalid, we should simulate the vdpa device
1335 * to reject the VIRTIO_NET_CTRL_MAC_TABLE_SET CVQ command
1337 *s
->status
= VIRTIO_NET_ERR
;
1338 return sizeof(*s
->status
);
1340 cursor
+= sizeof(mac_data
) + le32_to_cpu(mac_data
.entries
) * ETH_ALEN
;
1342 /* parse the multicast MAC address entries from CVQ command */
1343 r
= iov_to_buf(elem
->out_sg
, elem
->out_num
, cursor
,
1344 &mac_data
, sizeof(mac_data
));
1345 if (r
!= sizeof(mac_data
)) {
1347 * If the CVQ command is invalid, we should simulate the vdpa device
1348 * to reject the VIRTIO_NET_CTRL_MAC_TABLE_SET CVQ command
1350 *s
->status
= VIRTIO_NET_ERR
;
1351 return sizeof(*s
->status
);
1353 cursor
+= sizeof(mac_data
) + le32_to_cpu(mac_data
.entries
) * ETH_ALEN
;
1355 /* validate the CVQ command */
1356 if (iov_size(elem
->out_sg
, elem
->out_num
) != cursor
) {
1358 * If the CVQ command is invalid, we should simulate the vdpa device
1359 * to reject the VIRTIO_NET_CTRL_MAC_TABLE_SET CVQ command
1361 *s
->status
= VIRTIO_NET_ERR
;
1362 return sizeof(*s
->status
);
1366 * According to VirtIO standard, "Since there are no guarantees,
1367 * it can use a hash filter or silently switch to allmulti or
1368 * promiscuous mode if it is given too many addresses.".
1370 * Therefore, considering that QEMU is unable to send the entire
1371 * filter table to the vdpa device, it should send the
1372 * VIRTIO_NET_CTRL_RX_PROMISC CVQ command to enable promiscuous mode
1374 hdr_ptr
= out
->iov_base
;
1375 out
->iov_len
= sizeof(*hdr_ptr
) + sizeof(on
);
1377 hdr_ptr
->class = VIRTIO_NET_CTRL_RX
;
1378 hdr_ptr
->cmd
= VIRTIO_NET_CTRL_RX_PROMISC
;
1379 iov_from_buf(out
, 1, sizeof(*hdr_ptr
), &on
, sizeof(on
));
1380 r
= vhost_vdpa_net_cvq_add(s
, out
, 1, in
, 1);
1381 if (unlikely(r
< 0)) {
1386 * We can poll here since we've had BQL from the time
1387 * we sent the descriptor.
1389 r
= vhost_vdpa_net_svq_poll(s
, 1);
1390 if (unlikely(r
< sizeof(*s
->status
))) {
1393 if (*s
->status
!= VIRTIO_NET_OK
) {
1394 return sizeof(*s
->status
);
1398 * QEMU should also send a fake VIRTIO_NET_CTRL_MAC_TABLE_SET CVQ
1399 * command to the device model, including (`MAC_TABLE_ENTRIES` + 1)
1400 * non-multicast MAC addresses and (`MAC_TABLE_ENTRIES` + 1)
1401 * multicast MAC addresses.
1403 * By doing so, the device model can mark `n->mac_table.uni_overflow`
1404 * and `n->mac_table.multi_overflow`, enabling all packets to be
1405 * received, which aligns with the state of the vdpa device.
1408 uint32_t fake_uni_entries
= MAC_TABLE_ENTRIES
+ 1,
1409 fake_mul_entries
= MAC_TABLE_ENTRIES
+ 1,
1410 fake_cvq_size
= sizeof(struct virtio_net_ctrl_hdr
) +
1411 sizeof(mac_data
) + fake_uni_entries
* ETH_ALEN
+
1412 sizeof(mac_data
) + fake_mul_entries
* ETH_ALEN
;
1414 assert(fake_cvq_size
< vhost_vdpa_net_cvq_cmd_page_len());
1415 out
->iov_len
= fake_cvq_size
;
1417 /* pack the header for fake CVQ command */
1418 hdr_ptr
= out
->iov_base
+ cursor
;
1419 hdr_ptr
->class = VIRTIO_NET_CTRL_MAC
;
1420 hdr_ptr
->cmd
= VIRTIO_NET_CTRL_MAC_TABLE_SET
;
1421 cursor
+= sizeof(*hdr_ptr
);
1424 * Pack the non-multicast MAC addresses part for fake CVQ command.
1426 * According to virtio_net_handle_mac(), QEMU doesn't verify the MAC
1427 * addresses provided in CVQ command. Therefore, only the entries
1428 * field need to be prepared in the CVQ command.
1430 mac_ptr
= out
->iov_base
+ cursor
;
1431 mac_ptr
->entries
= cpu_to_le32(fake_uni_entries
);
1432 cursor
+= sizeof(*mac_ptr
) + fake_uni_entries
* ETH_ALEN
;
1435 * Pack the multicast MAC addresses part for fake CVQ command.
1437 * According to virtio_net_handle_mac(), QEMU doesn't verify the MAC
1438 * addresses provided in CVQ command. Therefore, only the entries
1439 * field need to be prepared in the CVQ command.
1441 mac_ptr
= out
->iov_base
+ cursor
;
1442 mac_ptr
->entries
= cpu_to_le32(fake_mul_entries
);
1445 * Simulating QEMU poll a vdpa device used buffer
1446 * for VIRTIO_NET_CTRL_MAC_TABLE_SET CVQ command
1448 return sizeof(*s
->status
);
1452 * Validate and copy control virtqueue commands.
1454 * Following QEMU guidelines, we offer a copy of the buffers to the device to
1455 * prevent TOCTOU bugs.
1457 static int vhost_vdpa_net_handle_ctrl_avail(VhostShadowVirtqueue
*svq
,
1458 VirtQueueElement
*elem
,
1461 VhostVDPAState
*s
= opaque
;
1463 const struct virtio_net_ctrl_hdr
*ctrl
;
1464 virtio_net_ctrl_ack status
= VIRTIO_NET_ERR
;
1465 /* Out buffer sent to both the vdpa device and the device model */
1466 struct iovec out
= {
1467 .iov_base
= s
->cvq_cmd_out_buffer
,
1469 /* in buffer used for device model */
1470 const struct iovec model_in
= {
1471 .iov_base
= &status
,
1472 .iov_len
= sizeof(status
),
1474 /* in buffer used for vdpa device */
1475 const struct iovec vdpa_in
= {
1476 .iov_base
= s
->status
,
1477 .iov_len
= sizeof(*s
->status
),
1479 ssize_t dev_written
= -EINVAL
;
1481 out
.iov_len
= iov_to_buf(elem
->out_sg
, elem
->out_num
, 0,
1482 s
->cvq_cmd_out_buffer
,
1483 vhost_vdpa_net_cvq_cmd_page_len());
1485 ctrl
= s
->cvq_cmd_out_buffer
;
1486 if (ctrl
->class == VIRTIO_NET_CTRL_ANNOUNCE
) {
1488 * Guest announce capability is emulated by qemu, so don't forward to
1491 dev_written
= sizeof(status
);
1492 *s
->status
= VIRTIO_NET_OK
;
1493 } else if (unlikely(ctrl
->class == VIRTIO_NET_CTRL_MAC
&&
1494 ctrl
->cmd
== VIRTIO_NET_CTRL_MAC_TABLE_SET
&&
1495 iov_size(elem
->out_sg
, elem
->out_num
) > out
.iov_len
)) {
1497 * Due to the size limitation of the out buffer sent to the vdpa device,
1498 * which is determined by vhost_vdpa_net_cvq_cmd_page_len(), excessive
1499 * MAC addresses set by the driver for the filter table can cause
1500 * truncation of the CVQ command in QEMU. As a result, the vdpa device
1501 * rejects the flawed CVQ command.
1503 * Therefore, QEMU must handle this situation instead of sending
1504 * the CVQ command directly.
1506 dev_written
= vhost_vdpa_net_excessive_mac_filter_cvq_add(s
, elem
,
1508 if (unlikely(dev_written
< 0)) {
1513 r
= vhost_vdpa_net_cvq_add(s
, &out
, 1, &vdpa_in
, 1);
1514 if (unlikely(r
< 0)) {
1520 * We can poll here since we've had BQL from the time
1521 * we sent the descriptor.
1523 dev_written
= vhost_vdpa_net_svq_poll(s
, 1);
1526 if (unlikely(dev_written
< sizeof(status
))) {
1527 error_report("Insufficient written data (%zu)", dev_written
);
1531 if (*s
->status
!= VIRTIO_NET_OK
) {
1535 status
= VIRTIO_NET_ERR
;
1536 virtio_net_handle_ctrl_iov(svq
->vdev
, &model_in
, 1, &out
, 1);
1537 if (status
!= VIRTIO_NET_OK
) {
1538 error_report("Bad CVQ processing in model");
1542 in_len
= iov_from_buf(elem
->in_sg
, elem
->in_num
, 0, &status
,
1544 if (unlikely(in_len
< sizeof(status
))) {
1545 error_report("Bad device CVQ written length");
1547 vhost_svq_push_elem(svq
, elem
, MIN(in_len
, sizeof(status
)));
1549 * `elem` belongs to vhost_vdpa_net_handle_ctrl_avail() only when
1550 * the function successfully forwards the CVQ command, indicated
1551 * by a non-negative value of `dev_written`. Otherwise, it still
1553 * This function should only free the `elem` when it owns.
1555 if (dev_written
>= 0) {
1558 return dev_written
< 0 ? dev_written
: 0;
1561 static const VhostShadowVirtqueueOps vhost_vdpa_net_svq_ops
= {
1562 .avail_handler
= vhost_vdpa_net_handle_ctrl_avail
,
1566 * Probe if CVQ is isolated
1568 * @device_fd The vdpa device fd
1569 * @features Features offered by the device.
1570 * @cvq_index The control vq pair index
1572 * Returns <0 in case of failure, 0 if false and 1 if true.
1574 static int vhost_vdpa_probe_cvq_isolation(int device_fd
, uint64_t features
,
1575 int cvq_index
, Error
**errp
)
1577 uint64_t backend_features
;
1579 uint8_t status
= VIRTIO_CONFIG_S_ACKNOWLEDGE
|
1580 VIRTIO_CONFIG_S_DRIVER
;
1585 r
= ioctl(device_fd
, VHOST_GET_BACKEND_FEATURES
, &backend_features
);
1586 if (unlikely(r
< 0)) {
1587 error_setg_errno(errp
, errno
, "Cannot get vdpa backend_features");
1591 if (!(backend_features
& BIT_ULL(VHOST_BACKEND_F_IOTLB_ASID
))) {
1595 r
= ioctl(device_fd
, VHOST_VDPA_SET_STATUS
, &status
);
1597 error_setg_errno(errp
, -r
, "Cannot set device status");
1601 r
= ioctl(device_fd
, VHOST_SET_FEATURES
, &features
);
1603 error_setg_errno(errp
, -r
, "Cannot set features");
1607 status
|= VIRTIO_CONFIG_S_FEATURES_OK
;
1608 r
= ioctl(device_fd
, VHOST_VDPA_SET_STATUS
, &status
);
1610 error_setg_errno(errp
, -r
, "Cannot set device status");
1614 cvq_group
= vhost_vdpa_get_vring_group(device_fd
, cvq_index
, errp
);
1615 if (unlikely(cvq_group
< 0)) {
1616 if (cvq_group
!= -ENOTSUP
) {
1622 * The kernel report VHOST_BACKEND_F_IOTLB_ASID if the vdpa frontend
1623 * support ASID even if the parent driver does not. The CVQ cannot be
1624 * isolated in this case.
1632 for (int i
= 0; i
< cvq_index
; ++i
) {
1633 int64_t group
= vhost_vdpa_get_vring_group(device_fd
, i
, errp
);
1634 if (unlikely(group
< 0)) {
1639 if (group
== (int64_t)cvq_group
) {
1649 ioctl(device_fd
, VHOST_VDPA_SET_STATUS
, &status
);
1653 static NetClientState
*net_vhost_vdpa_init(NetClientState
*peer
,
1657 int queue_pair_index
,
1661 struct vhost_vdpa_iova_range iova_range
,
1665 NetClientState
*nc
= NULL
;
1669 int cvq_isolated
= 0;
1672 nc
= qemu_new_net_client(&net_vhost_vdpa_info
, peer
, device
,
1675 cvq_isolated
= vhost_vdpa_probe_cvq_isolation(vdpa_device_fd
, features
,
1676 queue_pair_index
* 2,
1678 if (unlikely(cvq_isolated
< 0)) {
1682 nc
= qemu_new_net_control_client(&net_vhost_vdpa_cvq_info
, peer
,
1685 qemu_set_info_str(nc
, TYPE_VHOST_VDPA
);
1686 s
= DO_UPCAST(VhostVDPAState
, nc
, nc
);
1688 s
->vhost_vdpa
.device_fd
= vdpa_device_fd
;
1689 s
->vhost_vdpa
.index
= queue_pair_index
;
1690 s
->always_svq
= svq
;
1691 s
->migration_state
.notify
= NULL
;
1692 s
->vhost_vdpa
.shadow_vqs_enabled
= svq
;
1693 s
->vhost_vdpa
.iova_range
= iova_range
;
1694 s
->vhost_vdpa
.shadow_data
= svq
;
1695 if (queue_pair_index
== 0) {
1696 vhost_vdpa_net_valid_svq_features(features
,
1697 &s
->vhost_vdpa
.migration_blocker
);
1698 } else if (!is_datapath
) {
1699 s
->cvq_cmd_out_buffer
= mmap(NULL
, vhost_vdpa_net_cvq_cmd_page_len(),
1700 PROT_READ
| PROT_WRITE
,
1701 MAP_SHARED
| MAP_ANONYMOUS
, -1, 0);
1702 s
->status
= mmap(NULL
, vhost_vdpa_net_cvq_cmd_page_len(),
1703 PROT_READ
| PROT_WRITE
, MAP_SHARED
| MAP_ANONYMOUS
,
1706 s
->vhost_vdpa
.shadow_vq_ops
= &vhost_vdpa_net_svq_ops
;
1707 s
->vhost_vdpa
.shadow_vq_ops_opaque
= s
;
1708 s
->cvq_isolated
= cvq_isolated
;
1710 ret
= vhost_vdpa_add(nc
, (void *)&s
->vhost_vdpa
, queue_pair_index
, nvqs
);
1712 qemu_del_net_client(nc
);
1718 static int vhost_vdpa_get_features(int fd
, uint64_t *features
, Error
**errp
)
1720 int ret
= ioctl(fd
, VHOST_GET_FEATURES
, features
);
1721 if (unlikely(ret
< 0)) {
1722 error_setg_errno(errp
, errno
,
1723 "Fail to query features from vhost-vDPA device");
1728 static int vhost_vdpa_get_max_queue_pairs(int fd
, uint64_t features
,
1729 int *has_cvq
, Error
**errp
)
1731 unsigned long config_size
= offsetof(struct vhost_vdpa_config
, buf
);
1732 g_autofree
struct vhost_vdpa_config
*config
= NULL
;
1733 __virtio16
*max_queue_pairs
;
1736 if (features
& (1 << VIRTIO_NET_F_CTRL_VQ
)) {
1742 if (features
& (1 << VIRTIO_NET_F_MQ
)) {
1743 config
= g_malloc0(config_size
+ sizeof(*max_queue_pairs
));
1744 config
->off
= offsetof(struct virtio_net_config
, max_virtqueue_pairs
);
1745 config
->len
= sizeof(*max_queue_pairs
);
1747 ret
= ioctl(fd
, VHOST_VDPA_GET_CONFIG
, config
);
1749 error_setg(errp
, "Fail to get config from vhost-vDPA device");
1753 max_queue_pairs
= (__virtio16
*)&config
->buf
;
1755 return lduw_le_p(max_queue_pairs
);
1761 int net_init_vhost_vdpa(const Netdev
*netdev
, const char *name
,
1762 NetClientState
*peer
, Error
**errp
)
1764 const NetdevVhostVDPAOptions
*opts
;
1767 g_autofree NetClientState
**ncs
= NULL
;
1768 struct vhost_vdpa_iova_range iova_range
;
1770 int queue_pairs
, r
, i
= 0, has_cvq
= 0;
1772 assert(netdev
->type
== NET_CLIENT_DRIVER_VHOST_VDPA
);
1773 opts
= &netdev
->u
.vhost_vdpa
;
1774 if (!opts
->vhostdev
&& !opts
->vhostfd
) {
1776 "vhost-vdpa: neither vhostdev= nor vhostfd= was specified");
1780 if (opts
->vhostdev
&& opts
->vhostfd
) {
1782 "vhost-vdpa: vhostdev= and vhostfd= are mutually exclusive");
1786 if (opts
->vhostdev
) {
1787 vdpa_device_fd
= qemu_open(opts
->vhostdev
, O_RDWR
, errp
);
1788 if (vdpa_device_fd
== -1) {
1793 vdpa_device_fd
= monitor_fd_param(monitor_cur(), opts
->vhostfd
, errp
);
1794 if (vdpa_device_fd
== -1) {
1795 error_prepend(errp
, "vhost-vdpa: unable to parse vhostfd: ");
1800 r
= vhost_vdpa_get_features(vdpa_device_fd
, &features
, errp
);
1801 if (unlikely(r
< 0)) {
1805 queue_pairs
= vhost_vdpa_get_max_queue_pairs(vdpa_device_fd
, features
,
1807 if (queue_pairs
< 0) {
1808 qemu_close(vdpa_device_fd
);
1812 r
= vhost_vdpa_get_iova_range(vdpa_device_fd
, &iova_range
);
1813 if (unlikely(r
< 0)) {
1814 error_setg(errp
, "vhost-vdpa: get iova range failed: %s",
1819 if (opts
->x_svq
&& !vhost_vdpa_net_valid_svq_features(features
, errp
)) {
1823 ncs
= g_malloc0(sizeof(*ncs
) * queue_pairs
);
1825 for (i
= 0; i
< queue_pairs
; i
++) {
1826 ncs
[i
] = net_vhost_vdpa_init(peer
, TYPE_VHOST_VDPA
, name
,
1827 vdpa_device_fd
, i
, 2, true, opts
->x_svq
,
1828 iova_range
, features
, errp
);
1834 nc
= net_vhost_vdpa_init(peer
, TYPE_VHOST_VDPA
, name
,
1835 vdpa_device_fd
, i
, 1, false,
1836 opts
->x_svq
, iova_range
, features
, errp
);
1845 for (i
--; i
>= 0; i
--) {
1846 qemu_del_net_client(ncs
[i
]);
1850 qemu_close(vdpa_device_fd
);