4 * Copyright(c) 2017-2018 Intel Corporation.
5 * Copyright(c) 2020 Red Hat, Inc.
7 * This work is licensed under the terms of the GNU GPL, version 2 or later.
8 * See the COPYING file in the top-level directory.
12 #include "qemu/osdep.h"
14 #include "hw/virtio/virtio-net.h"
15 #include "net/vhost_net.h"
16 #include "net/vhost-vdpa.h"
17 #include "hw/virtio/vhost-vdpa.h"
18 #include "qemu/config-file.h"
19 #include "qemu/error-report.h"
21 #include "qemu/memalign.h"
22 #include "qemu/option.h"
23 #include "qapi/error.h"
24 #include <linux/vhost.h>
25 #include <sys/ioctl.h>
27 #include "standard-headers/linux/virtio_net.h"
28 #include "monitor/monitor.h"
29 #include "migration/migration.h"
30 #include "migration/misc.h"
31 #include "hw/virtio/vhost.h"
33 /* Todo:need to add the multiqueue support here */
34 typedef struct VhostVDPAState
{
36 struct vhost_vdpa vhost_vdpa
;
37 Notifier migration_state
;
38 VHostNetState
*vhost_net
;
40 /* Control commands shadow buffers */
41 void *cvq_cmd_out_buffer
;
42 virtio_net_ctrl_ack
*status
;
44 /* The device always have SVQ enabled */
47 /* The device can isolate CVQ in its own ASID */
53 const int vdpa_feature_bits
[] = {
54 VIRTIO_F_NOTIFY_ON_EMPTY
,
55 VIRTIO_RING_F_INDIRECT_DESC
,
56 VIRTIO_RING_F_EVENT_IDX
,
60 VIRTIO_NET_F_GUEST_CSUM
,
62 VIRTIO_NET_F_GUEST_TSO4
,
63 VIRTIO_NET_F_GUEST_TSO6
,
64 VIRTIO_NET_F_GUEST_ECN
,
65 VIRTIO_NET_F_GUEST_UFO
,
66 VIRTIO_NET_F_HOST_TSO4
,
67 VIRTIO_NET_F_HOST_TSO6
,
68 VIRTIO_NET_F_HOST_ECN
,
69 VIRTIO_NET_F_HOST_UFO
,
70 VIRTIO_NET_F_MRG_RXBUF
,
73 VIRTIO_NET_F_CTRL_RX_EXTRA
,
74 VIRTIO_NET_F_CTRL_VLAN
,
75 VIRTIO_NET_F_CTRL_MAC_ADDR
,
79 VIRTIO_F_IOMMU_PLATFORM
,
83 VIRTIO_NET_F_HASH_REPORT
,
85 VHOST_INVALID_FEATURE_BIT
88 /** Supported device specific feature bits with SVQ */
89 static const uint64_t vdpa_svq_device_features
=
90 BIT_ULL(VIRTIO_NET_F_CSUM
) |
91 BIT_ULL(VIRTIO_NET_F_GUEST_CSUM
) |
92 BIT_ULL(VIRTIO_NET_F_MTU
) |
93 BIT_ULL(VIRTIO_NET_F_MAC
) |
94 BIT_ULL(VIRTIO_NET_F_GUEST_TSO4
) |
95 BIT_ULL(VIRTIO_NET_F_GUEST_TSO6
) |
96 BIT_ULL(VIRTIO_NET_F_GUEST_ECN
) |
97 BIT_ULL(VIRTIO_NET_F_GUEST_UFO
) |
98 BIT_ULL(VIRTIO_NET_F_HOST_TSO4
) |
99 BIT_ULL(VIRTIO_NET_F_HOST_TSO6
) |
100 BIT_ULL(VIRTIO_NET_F_HOST_ECN
) |
101 BIT_ULL(VIRTIO_NET_F_HOST_UFO
) |
102 BIT_ULL(VIRTIO_NET_F_MRG_RXBUF
) |
103 BIT_ULL(VIRTIO_NET_F_STATUS
) |
104 BIT_ULL(VIRTIO_NET_F_CTRL_VQ
) |
105 BIT_ULL(VIRTIO_NET_F_MQ
) |
106 BIT_ULL(VIRTIO_F_ANY_LAYOUT
) |
107 BIT_ULL(VIRTIO_NET_F_CTRL_MAC_ADDR
) |
108 /* VHOST_F_LOG_ALL is exposed by SVQ */
109 BIT_ULL(VHOST_F_LOG_ALL
) |
110 BIT_ULL(VIRTIO_NET_F_RSC_EXT
) |
111 BIT_ULL(VIRTIO_NET_F_STANDBY
) |
112 BIT_ULL(VIRTIO_NET_F_SPEED_DUPLEX
);
114 #define VHOST_VDPA_NET_CVQ_ASID 1
116 VHostNetState
*vhost_vdpa_get_vhost_net(NetClientState
*nc
)
118 VhostVDPAState
*s
= DO_UPCAST(VhostVDPAState
, nc
, nc
);
119 assert(nc
->info
->type
== NET_CLIENT_DRIVER_VHOST_VDPA
);
123 static size_t vhost_vdpa_net_cvq_cmd_len(void)
126 * MAC_TABLE_SET is the ctrl command that produces the longer out buffer.
127 * In buffer is always 1 byte, so it should fit here
129 return sizeof(struct virtio_net_ctrl_hdr
) +
130 2 * sizeof(struct virtio_net_ctrl_mac
) +
131 MAC_TABLE_ENTRIES
* ETH_ALEN
;
134 static size_t vhost_vdpa_net_cvq_cmd_page_len(void)
136 return ROUND_UP(vhost_vdpa_net_cvq_cmd_len(), qemu_real_host_page_size());
139 static bool vhost_vdpa_net_valid_svq_features(uint64_t features
, Error
**errp
)
141 uint64_t invalid_dev_features
=
142 features
& ~vdpa_svq_device_features
&
143 /* Transport are all accepted at this point */
144 ~MAKE_64BIT_MASK(VIRTIO_TRANSPORT_F_START
,
145 VIRTIO_TRANSPORT_F_END
- VIRTIO_TRANSPORT_F_START
);
147 if (invalid_dev_features
) {
148 error_setg(errp
, "vdpa svq does not work with features 0x%" PRIx64
,
149 invalid_dev_features
);
153 return vhost_svq_valid_features(features
, errp
);
156 static int vhost_vdpa_net_check_device_id(struct vhost_net
*net
)
160 struct vhost_dev
*hdev
;
162 hdev
= (struct vhost_dev
*)&net
->dev
;
163 ret
= hdev
->vhost_ops
->vhost_get_device_id(hdev
, &device_id
);
164 if (device_id
!= VIRTIO_ID_NET
) {
170 static int vhost_vdpa_add(NetClientState
*ncs
, void *be
,
171 int queue_pair_index
, int nvqs
)
173 VhostNetOptions options
;
174 struct vhost_net
*net
= NULL
;
178 options
.backend_type
= VHOST_BACKEND_TYPE_VDPA
;
179 assert(ncs
->info
->type
== NET_CLIENT_DRIVER_VHOST_VDPA
);
180 s
= DO_UPCAST(VhostVDPAState
, nc
, ncs
);
181 options
.net_backend
= ncs
;
183 options
.busyloop_timeout
= 0;
186 net
= vhost_net_init(&options
);
188 error_report("failed to init vhost_net for queue");
192 ret
= vhost_vdpa_net_check_device_id(net
);
198 vhost_net_cleanup(net
);
204 static void vhost_vdpa_cleanup(NetClientState
*nc
)
206 VhostVDPAState
*s
= DO_UPCAST(VhostVDPAState
, nc
, nc
);
208 munmap(s
->cvq_cmd_out_buffer
, vhost_vdpa_net_cvq_cmd_page_len());
209 munmap(s
->status
, vhost_vdpa_net_cvq_cmd_page_len());
211 vhost_net_cleanup(s
->vhost_net
);
212 g_free(s
->vhost_net
);
215 if (s
->vhost_vdpa
.device_fd
>= 0) {
216 qemu_close(s
->vhost_vdpa
.device_fd
);
217 s
->vhost_vdpa
.device_fd
= -1;
221 static bool vhost_vdpa_has_vnet_hdr(NetClientState
*nc
)
223 assert(nc
->info
->type
== NET_CLIENT_DRIVER_VHOST_VDPA
);
228 static bool vhost_vdpa_has_ufo(NetClientState
*nc
)
230 assert(nc
->info
->type
== NET_CLIENT_DRIVER_VHOST_VDPA
);
231 VhostVDPAState
*s
= DO_UPCAST(VhostVDPAState
, nc
, nc
);
232 uint64_t features
= 0;
233 features
|= (1ULL << VIRTIO_NET_F_HOST_UFO
);
234 features
= vhost_net_get_features(s
->vhost_net
, features
);
235 return !!(features
& (1ULL << VIRTIO_NET_F_HOST_UFO
));
239 static bool vhost_vdpa_check_peer_type(NetClientState
*nc
, ObjectClass
*oc
,
242 const char *driver
= object_class_get_name(oc
);
244 if (!g_str_has_prefix(driver
, "virtio-net-")) {
245 error_setg(errp
, "vhost-vdpa requires frontend driver virtio-net-*");
252 /** Dummy receive in case qemu falls back to userland tap networking */
253 static ssize_t
vhost_vdpa_receive(NetClientState
*nc
, const uint8_t *buf
,
259 /** From any vdpa net client, get the netclient of the first queue pair */
260 static VhostVDPAState
*vhost_vdpa_net_first_nc_vdpa(VhostVDPAState
*s
)
262 NICState
*nic
= qemu_get_nic(s
->nc
.peer
);
263 NetClientState
*nc0
= qemu_get_peer(nic
->ncs
, 0);
265 return DO_UPCAST(VhostVDPAState
, nc
, nc0
);
268 static void vhost_vdpa_net_log_global_enable(VhostVDPAState
*s
, bool enable
)
270 struct vhost_vdpa
*v
= &s
->vhost_vdpa
;
273 int data_queue_pairs
, cvq
, r
;
275 /* We are only called on the first data vqs and only if x-svq is not set */
276 if (s
->vhost_vdpa
.shadow_vqs_enabled
== enable
) {
281 n
= VIRTIO_NET(vdev
);
282 if (!n
->vhost_started
) {
286 data_queue_pairs
= n
->multiqueue
? n
->max_queue_pairs
: 1;
287 cvq
= virtio_vdev_has_feature(vdev
, VIRTIO_NET_F_CTRL_VQ
) ?
288 n
->max_ncs
- n
->max_queue_pairs
: 0;
290 * TODO: vhost_net_stop does suspend, get_base and reset. We can be smarter
291 * in the future and resume the device if read-only operations between
292 * suspend and reset goes wrong.
294 vhost_net_stop(vdev
, n
->nic
->ncs
, data_queue_pairs
, cvq
);
296 /* Start will check migration setup_or_active to configure or not SVQ */
297 r
= vhost_net_start(vdev
, n
->nic
->ncs
, data_queue_pairs
, cvq
);
298 if (unlikely(r
< 0)) {
299 error_report("unable to start vhost net: %s(%d)", g_strerror(-r
), -r
);
303 static void vdpa_net_migration_state_notifier(Notifier
*notifier
, void *data
)
305 MigrationState
*migration
= data
;
306 VhostVDPAState
*s
= container_of(notifier
, VhostVDPAState
,
309 if (migration_in_setup(migration
)) {
310 vhost_vdpa_net_log_global_enable(s
, true);
311 } else if (migration_has_failed(migration
)) {
312 vhost_vdpa_net_log_global_enable(s
, false);
316 static void vhost_vdpa_net_data_start_first(VhostVDPAState
*s
)
318 struct vhost_vdpa
*v
= &s
->vhost_vdpa
;
320 add_migration_state_change_notifier(&s
->migration_state
);
321 if (v
->shadow_vqs_enabled
) {
322 v
->iova_tree
= vhost_iova_tree_new(v
->iova_range
.first
,
327 static int vhost_vdpa_net_data_start(NetClientState
*nc
)
329 VhostVDPAState
*s
= DO_UPCAST(VhostVDPAState
, nc
, nc
);
330 struct vhost_vdpa
*v
= &s
->vhost_vdpa
;
332 assert(nc
->info
->type
== NET_CLIENT_DRIVER_VHOST_VDPA
);
335 migration_is_setup_or_active(migrate_get_current()->state
)) {
336 v
->shadow_vqs_enabled
= true;
337 v
->shadow_data
= true;
339 v
->shadow_vqs_enabled
= false;
340 v
->shadow_data
= false;
344 vhost_vdpa_net_data_start_first(s
);
348 if (v
->shadow_vqs_enabled
) {
349 VhostVDPAState
*s0
= vhost_vdpa_net_first_nc_vdpa(s
);
350 v
->iova_tree
= s0
->vhost_vdpa
.iova_tree
;
356 static void vhost_vdpa_net_client_stop(NetClientState
*nc
)
358 VhostVDPAState
*s
= DO_UPCAST(VhostVDPAState
, nc
, nc
);
359 struct vhost_dev
*dev
;
361 assert(nc
->info
->type
== NET_CLIENT_DRIVER_VHOST_VDPA
);
363 if (s
->vhost_vdpa
.index
== 0) {
364 remove_migration_state_change_notifier(&s
->migration_state
);
367 dev
= s
->vhost_vdpa
.dev
;
368 if (dev
->vq_index
+ dev
->nvqs
== dev
->vq_index_end
) {
369 g_clear_pointer(&s
->vhost_vdpa
.iova_tree
, vhost_iova_tree_delete
);
373 static NetClientInfo net_vhost_vdpa_info
= {
374 .type
= NET_CLIENT_DRIVER_VHOST_VDPA
,
375 .size
= sizeof(VhostVDPAState
),
376 .receive
= vhost_vdpa_receive
,
377 .start
= vhost_vdpa_net_data_start
,
378 .stop
= vhost_vdpa_net_client_stop
,
379 .cleanup
= vhost_vdpa_cleanup
,
380 .has_vnet_hdr
= vhost_vdpa_has_vnet_hdr
,
381 .has_ufo
= vhost_vdpa_has_ufo
,
382 .check_peer_type
= vhost_vdpa_check_peer_type
,
385 static int64_t vhost_vdpa_get_vring_group(int device_fd
, unsigned vq_index
,
388 struct vhost_vring_state state
= {
391 int r
= ioctl(device_fd
, VHOST_VDPA_GET_VRING_GROUP
, &state
);
393 if (unlikely(r
< 0)) {
395 error_setg_errno(errp
, errno
, "Cannot get VQ %u group", vq_index
);
402 static int vhost_vdpa_set_address_space_id(struct vhost_vdpa
*v
,
406 struct vhost_vring_state asid
= {
412 r
= ioctl(v
->device_fd
, VHOST_VDPA_SET_GROUP_ASID
, &asid
);
413 if (unlikely(r
< 0)) {
414 error_report("Can't set vq group %u asid %u, errno=%d (%s)",
415 asid
.index
, asid
.num
, errno
, g_strerror(errno
));
420 static void vhost_vdpa_cvq_unmap_buf(struct vhost_vdpa
*v
, void *addr
)
422 VhostIOVATree
*tree
= v
->iova_tree
;
425 * No need to specify size or to look for more translations since
426 * this contiguous chunk was allocated by us.
428 .translated_addr
= (hwaddr
)(uintptr_t)addr
,
430 const DMAMap
*map
= vhost_iova_tree_find_iova(tree
, &needle
);
433 if (unlikely(!map
)) {
434 error_report("Cannot locate expected map");
438 r
= vhost_vdpa_dma_unmap(v
, v
->address_space_id
, map
->iova
, map
->size
+ 1);
439 if (unlikely(r
!= 0)) {
440 error_report("Device cannot unmap: %s(%d)", g_strerror(r
), r
);
443 vhost_iova_tree_remove(tree
, *map
);
446 /** Map CVQ buffer. */
447 static int vhost_vdpa_cvq_map_buf(struct vhost_vdpa
*v
, void *buf
, size_t size
,
453 map
.translated_addr
= (hwaddr
)(uintptr_t)buf
;
455 map
.perm
= write
? IOMMU_RW
: IOMMU_RO
,
456 r
= vhost_iova_tree_map_alloc(v
->iova_tree
, &map
);
457 if (unlikely(r
!= IOVA_OK
)) {
458 error_report("Cannot map injected element");
462 r
= vhost_vdpa_dma_map(v
, v
->address_space_id
, map
.iova
,
463 vhost_vdpa_net_cvq_cmd_page_len(), buf
, !write
);
464 if (unlikely(r
< 0)) {
471 vhost_iova_tree_remove(v
->iova_tree
, map
);
475 static int vhost_vdpa_net_cvq_start(NetClientState
*nc
)
477 VhostVDPAState
*s
, *s0
;
478 struct vhost_vdpa
*v
;
483 assert(nc
->info
->type
== NET_CLIENT_DRIVER_VHOST_VDPA
);
485 s
= DO_UPCAST(VhostVDPAState
, nc
, nc
);
488 s0
= vhost_vdpa_net_first_nc_vdpa(s
);
489 v
->shadow_data
= s0
->vhost_vdpa
.shadow_vqs_enabled
;
490 v
->shadow_vqs_enabled
= s
->always_svq
;
491 s
->vhost_vdpa
.address_space_id
= VHOST_VDPA_GUEST_PA_ASID
;
493 if (s
->vhost_vdpa
.shadow_data
) {
494 /* SVQ is already configured for all virtqueues */
499 * If we early return in these cases SVQ will not be enabled. The migration
500 * will be blocked as long as vhost-vdpa backends will not offer _F_LOG.
502 if (!vhost_vdpa_net_valid_svq_features(v
->dev
->features
, NULL
)) {
506 if (!s
->cvq_isolated
) {
510 cvq_group
= vhost_vdpa_get_vring_group(v
->device_fd
,
511 v
->dev
->vq_index_end
- 1,
513 if (unlikely(cvq_group
< 0)) {
514 error_report_err(err
);
518 r
= vhost_vdpa_set_address_space_id(v
, cvq_group
, VHOST_VDPA_NET_CVQ_ASID
);
519 if (unlikely(r
< 0)) {
523 v
->shadow_vqs_enabled
= true;
524 s
->vhost_vdpa
.address_space_id
= VHOST_VDPA_NET_CVQ_ASID
;
527 if (!s
->vhost_vdpa
.shadow_vqs_enabled
) {
531 if (s0
->vhost_vdpa
.iova_tree
) {
533 * SVQ is already configured for all virtqueues. Reuse IOVA tree for
534 * simplicity, whether CVQ shares ASID with guest or not, because:
535 * - Memory listener need access to guest's memory addresses allocated
537 * - There should be plenty of IOVA address space for both ASID not to
538 * worry about collisions between them. Guest's translations are
539 * still validated with virtio virtqueue_pop so there is no risk for
540 * the guest to access memory that it shouldn't.
542 * To allocate a iova tree per ASID is doable but it complicates the
543 * code and it is not worth it for the moment.
545 v
->iova_tree
= s0
->vhost_vdpa
.iova_tree
;
547 v
->iova_tree
= vhost_iova_tree_new(v
->iova_range
.first
,
551 r
= vhost_vdpa_cvq_map_buf(&s
->vhost_vdpa
, s
->cvq_cmd_out_buffer
,
552 vhost_vdpa_net_cvq_cmd_page_len(), false);
553 if (unlikely(r
< 0)) {
557 r
= vhost_vdpa_cvq_map_buf(&s
->vhost_vdpa
, s
->status
,
558 vhost_vdpa_net_cvq_cmd_page_len(), true);
559 if (unlikely(r
< 0)) {
560 vhost_vdpa_cvq_unmap_buf(&s
->vhost_vdpa
, s
->cvq_cmd_out_buffer
);
566 static void vhost_vdpa_net_cvq_stop(NetClientState
*nc
)
568 VhostVDPAState
*s
= DO_UPCAST(VhostVDPAState
, nc
, nc
);
570 assert(nc
->info
->type
== NET_CLIENT_DRIVER_VHOST_VDPA
);
572 if (s
->vhost_vdpa
.shadow_vqs_enabled
) {
573 vhost_vdpa_cvq_unmap_buf(&s
->vhost_vdpa
, s
->cvq_cmd_out_buffer
);
574 vhost_vdpa_cvq_unmap_buf(&s
->vhost_vdpa
, s
->status
);
577 vhost_vdpa_net_client_stop(nc
);
580 static ssize_t
vhost_vdpa_net_cvq_add(VhostVDPAState
*s
, size_t out_len
,
583 /* Buffers for the device */
584 const struct iovec out
= {
585 .iov_base
= s
->cvq_cmd_out_buffer
,
588 const struct iovec in
= {
589 .iov_base
= s
->status
,
590 .iov_len
= sizeof(virtio_net_ctrl_ack
),
592 VhostShadowVirtqueue
*svq
= g_ptr_array_index(s
->vhost_vdpa
.shadow_vqs
, 0);
595 r
= vhost_svq_add(svq
, &out
, 1, &in
, 1, NULL
);
596 if (unlikely(r
!= 0)) {
597 if (unlikely(r
== -ENOSPC
)) {
598 qemu_log_mask(LOG_GUEST_ERROR
, "%s: No space on device queue\n",
605 * We can poll here since we've had BQL from the time we sent the
606 * descriptor. Also, we need to take the answer before SVQ pulls by itself,
607 * when BQL is released
609 return vhost_svq_poll(svq
);
612 static ssize_t
vhost_vdpa_net_load_cmd(VhostVDPAState
*s
, uint8_t class,
613 uint8_t cmd
, const void *data
,
616 const struct virtio_net_ctrl_hdr ctrl
= {
621 assert(data_size
< vhost_vdpa_net_cvq_cmd_page_len() - sizeof(ctrl
));
623 memcpy(s
->cvq_cmd_out_buffer
, &ctrl
, sizeof(ctrl
));
624 memcpy(s
->cvq_cmd_out_buffer
+ sizeof(ctrl
), data
, data_size
);
626 return vhost_vdpa_net_cvq_add(s
, sizeof(ctrl
) + data_size
,
627 sizeof(virtio_net_ctrl_ack
));
630 static int vhost_vdpa_net_load_mac(VhostVDPAState
*s
, const VirtIONet
*n
)
632 if (virtio_vdev_has_feature(&n
->parent_obj
, VIRTIO_NET_F_CTRL_MAC_ADDR
)) {
633 ssize_t dev_written
= vhost_vdpa_net_load_cmd(s
, VIRTIO_NET_CTRL_MAC
,
634 VIRTIO_NET_CTRL_MAC_ADDR_SET
,
635 n
->mac
, sizeof(n
->mac
));
636 if (unlikely(dev_written
< 0)) {
640 return *s
->status
!= VIRTIO_NET_OK
;
646 static int vhost_vdpa_net_load_mq(VhostVDPAState
*s
,
649 struct virtio_net_ctrl_mq mq
;
652 if (!virtio_vdev_has_feature(&n
->parent_obj
, VIRTIO_NET_F_MQ
)) {
656 mq
.virtqueue_pairs
= cpu_to_le16(n
->curr_queue_pairs
);
657 dev_written
= vhost_vdpa_net_load_cmd(s
, VIRTIO_NET_CTRL_MQ
,
658 VIRTIO_NET_CTRL_MQ_VQ_PAIRS_SET
, &mq
,
660 if (unlikely(dev_written
< 0)) {
664 return *s
->status
!= VIRTIO_NET_OK
;
667 static int vhost_vdpa_net_load(NetClientState
*nc
)
669 VhostVDPAState
*s
= DO_UPCAST(VhostVDPAState
, nc
, nc
);
670 struct vhost_vdpa
*v
= &s
->vhost_vdpa
;
674 assert(nc
->info
->type
== NET_CLIENT_DRIVER_VHOST_VDPA
);
676 if (!v
->shadow_vqs_enabled
) {
680 n
= VIRTIO_NET(v
->dev
->vdev
);
681 r
= vhost_vdpa_net_load_mac(s
, n
);
682 if (unlikely(r
< 0)) {
685 r
= vhost_vdpa_net_load_mq(s
, n
);
693 static NetClientInfo net_vhost_vdpa_cvq_info
= {
694 .type
= NET_CLIENT_DRIVER_VHOST_VDPA
,
695 .size
= sizeof(VhostVDPAState
),
696 .receive
= vhost_vdpa_receive
,
697 .start
= vhost_vdpa_net_cvq_start
,
698 .load
= vhost_vdpa_net_load
,
699 .stop
= vhost_vdpa_net_cvq_stop
,
700 .cleanup
= vhost_vdpa_cleanup
,
701 .has_vnet_hdr
= vhost_vdpa_has_vnet_hdr
,
702 .has_ufo
= vhost_vdpa_has_ufo
,
703 .check_peer_type
= vhost_vdpa_check_peer_type
,
707 * Validate and copy control virtqueue commands.
709 * Following QEMU guidelines, we offer a copy of the buffers to the device to
710 * prevent TOCTOU bugs.
712 static int vhost_vdpa_net_handle_ctrl_avail(VhostShadowVirtqueue
*svq
,
713 VirtQueueElement
*elem
,
716 VhostVDPAState
*s
= opaque
;
718 virtio_net_ctrl_ack status
= VIRTIO_NET_ERR
;
719 /* Out buffer sent to both the vdpa device and the device model */
721 .iov_base
= s
->cvq_cmd_out_buffer
,
723 /* in buffer used for device model */
724 const struct iovec in
= {
726 .iov_len
= sizeof(status
),
728 ssize_t dev_written
= -EINVAL
;
730 out
.iov_len
= iov_to_buf(elem
->out_sg
, elem
->out_num
, 0,
731 s
->cvq_cmd_out_buffer
,
732 vhost_vdpa_net_cvq_cmd_len());
733 if (*(uint8_t *)s
->cvq_cmd_out_buffer
== VIRTIO_NET_CTRL_ANNOUNCE
) {
735 * Guest announce capability is emulated by qemu, so don't forward to
738 dev_written
= sizeof(status
);
739 *s
->status
= VIRTIO_NET_OK
;
741 dev_written
= vhost_vdpa_net_cvq_add(s
, out
.iov_len
, sizeof(status
));
742 if (unlikely(dev_written
< 0)) {
747 if (unlikely(dev_written
< sizeof(status
))) {
748 error_report("Insufficient written data (%zu)", dev_written
);
752 if (*s
->status
!= VIRTIO_NET_OK
) {
753 return VIRTIO_NET_ERR
;
756 status
= VIRTIO_NET_ERR
;
757 virtio_net_handle_ctrl_iov(svq
->vdev
, &in
, 1, &out
, 1);
758 if (status
!= VIRTIO_NET_OK
) {
759 error_report("Bad CVQ processing in model");
763 in_len
= iov_from_buf(elem
->in_sg
, elem
->in_num
, 0, &status
,
765 if (unlikely(in_len
< sizeof(status
))) {
766 error_report("Bad device CVQ written length");
768 vhost_svq_push_elem(svq
, elem
, MIN(in_len
, sizeof(status
)));
770 return dev_written
< 0 ? dev_written
: 0;
773 static const VhostShadowVirtqueueOps vhost_vdpa_net_svq_ops
= {
774 .avail_handler
= vhost_vdpa_net_handle_ctrl_avail
,
778 * Probe if CVQ is isolated
780 * @device_fd The vdpa device fd
781 * @features Features offered by the device.
782 * @cvq_index The control vq pair index
784 * Returns <0 in case of failure, 0 if false and 1 if true.
786 static int vhost_vdpa_probe_cvq_isolation(int device_fd
, uint64_t features
,
787 int cvq_index
, Error
**errp
)
789 uint64_t backend_features
;
791 uint8_t status
= VIRTIO_CONFIG_S_ACKNOWLEDGE
|
792 VIRTIO_CONFIG_S_DRIVER
|
793 VIRTIO_CONFIG_S_FEATURES_OK
;
798 r
= ioctl(device_fd
, VHOST_GET_BACKEND_FEATURES
, &backend_features
);
799 if (unlikely(r
< 0)) {
800 error_setg_errno(errp
, errno
, "Cannot get vdpa backend_features");
804 if (!(backend_features
& BIT_ULL(VHOST_BACKEND_F_IOTLB_ASID
))) {
808 r
= ioctl(device_fd
, VHOST_SET_FEATURES
, &features
);
810 error_setg_errno(errp
, errno
, "Cannot set features");
813 r
= ioctl(device_fd
, VHOST_VDPA_SET_STATUS
, &status
);
815 error_setg_errno(errp
, -r
, "Cannot set device features");
819 cvq_group
= vhost_vdpa_get_vring_group(device_fd
, cvq_index
, errp
);
820 if (unlikely(cvq_group
< 0)) {
821 if (cvq_group
!= -ENOTSUP
) {
827 * The kernel report VHOST_BACKEND_F_IOTLB_ASID if the vdpa frontend
828 * support ASID even if the parent driver does not. The CVQ cannot be
829 * isolated in this case.
837 for (int i
= 0; i
< cvq_index
; ++i
) {
838 int64_t group
= vhost_vdpa_get_vring_group(device_fd
, i
, errp
);
839 if (unlikely(group
< 0)) {
844 if (group
== (int64_t)cvq_group
) {
854 ioctl(device_fd
, VHOST_VDPA_SET_STATUS
, &status
);
858 static NetClientState
*net_vhost_vdpa_init(NetClientState
*peer
,
862 int queue_pair_index
,
866 struct vhost_vdpa_iova_range iova_range
,
870 NetClientState
*nc
= NULL
;
877 nc
= qemu_new_net_client(&net_vhost_vdpa_info
, peer
, device
,
880 cvq_isolated
= vhost_vdpa_probe_cvq_isolation(vdpa_device_fd
, features
,
881 queue_pair_index
* 2,
883 if (unlikely(cvq_isolated
< 0)) {
887 nc
= qemu_new_net_control_client(&net_vhost_vdpa_cvq_info
, peer
,
890 qemu_set_info_str(nc
, TYPE_VHOST_VDPA
);
891 s
= DO_UPCAST(VhostVDPAState
, nc
, nc
);
893 s
->vhost_vdpa
.device_fd
= vdpa_device_fd
;
894 s
->vhost_vdpa
.index
= queue_pair_index
;
896 s
->migration_state
.notify
= vdpa_net_migration_state_notifier
;
897 s
->vhost_vdpa
.shadow_vqs_enabled
= svq
;
898 s
->vhost_vdpa
.iova_range
= iova_range
;
899 s
->vhost_vdpa
.shadow_data
= svq
;
900 if (queue_pair_index
== 0) {
901 vhost_vdpa_net_valid_svq_features(features
,
902 &s
->vhost_vdpa
.migration_blocker
);
903 } else if (!is_datapath
) {
904 s
->cvq_cmd_out_buffer
= mmap(NULL
, vhost_vdpa_net_cvq_cmd_page_len(),
905 PROT_READ
| PROT_WRITE
,
906 MAP_SHARED
| MAP_ANONYMOUS
, -1, 0);
907 s
->status
= mmap(NULL
, vhost_vdpa_net_cvq_cmd_page_len(),
908 PROT_READ
| PROT_WRITE
, MAP_SHARED
| MAP_ANONYMOUS
,
911 s
->vhost_vdpa
.shadow_vq_ops
= &vhost_vdpa_net_svq_ops
;
912 s
->vhost_vdpa
.shadow_vq_ops_opaque
= s
;
913 s
->cvq_isolated
= cvq_isolated
;
916 * TODO: We cannot migrate devices with CVQ and no x-svq enabled as
917 * there is no way to set the device state (MAC, MQ, etc) before
918 * starting the datapath.
920 * Migration blocker ownership now belongs to s->vhost_vdpa.
923 error_setg(&s
->vhost_vdpa
.migration_blocker
,
924 "net vdpa cannot migrate with CVQ feature");
927 ret
= vhost_vdpa_add(nc
, (void *)&s
->vhost_vdpa
, queue_pair_index
, nvqs
);
929 qemu_del_net_client(nc
);
935 static int vhost_vdpa_get_features(int fd
, uint64_t *features
, Error
**errp
)
937 int ret
= ioctl(fd
, VHOST_GET_FEATURES
, features
);
938 if (unlikely(ret
< 0)) {
939 error_setg_errno(errp
, errno
,
940 "Fail to query features from vhost-vDPA device");
945 static int vhost_vdpa_get_max_queue_pairs(int fd
, uint64_t features
,
946 int *has_cvq
, Error
**errp
)
948 unsigned long config_size
= offsetof(struct vhost_vdpa_config
, buf
);
949 g_autofree
struct vhost_vdpa_config
*config
= NULL
;
950 __virtio16
*max_queue_pairs
;
953 if (features
& (1 << VIRTIO_NET_F_CTRL_VQ
)) {
959 if (features
& (1 << VIRTIO_NET_F_MQ
)) {
960 config
= g_malloc0(config_size
+ sizeof(*max_queue_pairs
));
961 config
->off
= offsetof(struct virtio_net_config
, max_virtqueue_pairs
);
962 config
->len
= sizeof(*max_queue_pairs
);
964 ret
= ioctl(fd
, VHOST_VDPA_GET_CONFIG
, config
);
966 error_setg(errp
, "Fail to get config from vhost-vDPA device");
970 max_queue_pairs
= (__virtio16
*)&config
->buf
;
972 return lduw_le_p(max_queue_pairs
);
978 int net_init_vhost_vdpa(const Netdev
*netdev
, const char *name
,
979 NetClientState
*peer
, Error
**errp
)
981 const NetdevVhostVDPAOptions
*opts
;
984 g_autofree NetClientState
**ncs
= NULL
;
985 struct vhost_vdpa_iova_range iova_range
;
987 int queue_pairs
, r
, i
= 0, has_cvq
= 0;
989 assert(netdev
->type
== NET_CLIENT_DRIVER_VHOST_VDPA
);
990 opts
= &netdev
->u
.vhost_vdpa
;
991 if (!opts
->vhostdev
&& !opts
->vhostfd
) {
993 "vhost-vdpa: neither vhostdev= nor vhostfd= was specified");
997 if (opts
->vhostdev
&& opts
->vhostfd
) {
999 "vhost-vdpa: vhostdev= and vhostfd= are mutually exclusive");
1003 if (opts
->vhostdev
) {
1004 vdpa_device_fd
= qemu_open(opts
->vhostdev
, O_RDWR
, errp
);
1005 if (vdpa_device_fd
== -1) {
1010 vdpa_device_fd
= monitor_fd_param(monitor_cur(), opts
->vhostfd
, errp
);
1011 if (vdpa_device_fd
== -1) {
1012 error_prepend(errp
, "vhost-vdpa: unable to parse vhostfd: ");
1017 r
= vhost_vdpa_get_features(vdpa_device_fd
, &features
, errp
);
1018 if (unlikely(r
< 0)) {
1022 queue_pairs
= vhost_vdpa_get_max_queue_pairs(vdpa_device_fd
, features
,
1024 if (queue_pairs
< 0) {
1025 qemu_close(vdpa_device_fd
);
1029 r
= vhost_vdpa_get_iova_range(vdpa_device_fd
, &iova_range
);
1030 if (unlikely(r
< 0)) {
1031 error_setg(errp
, "vhost-vdpa: get iova range failed: %s",
1036 if (opts
->x_svq
&& !vhost_vdpa_net_valid_svq_features(features
, errp
)) {
1040 ncs
= g_malloc0(sizeof(*ncs
) * queue_pairs
);
1042 for (i
= 0; i
< queue_pairs
; i
++) {
1043 ncs
[i
] = net_vhost_vdpa_init(peer
, TYPE_VHOST_VDPA
, name
,
1044 vdpa_device_fd
, i
, 2, true, opts
->x_svq
,
1045 iova_range
, features
, errp
);
1051 nc
= net_vhost_vdpa_init(peer
, TYPE_VHOST_VDPA
, name
,
1052 vdpa_device_fd
, i
, 1, false,
1053 opts
->x_svq
, iova_range
, features
, errp
);
1062 for (i
--; i
>= 0; i
--) {
1063 qemu_del_net_client(ncs
[i
]);
1067 qemu_close(vdpa_device_fd
);