4 * Copyright(c) 2017-2018 Intel Corporation.
5 * Copyright(c) 2020 Red Hat, Inc.
7 * This work is licensed under the terms of the GNU GPL, version 2 or later.
8 * See the COPYING file in the top-level directory.
12 #include "qemu/osdep.h"
14 #include "hw/virtio/virtio-net.h"
15 #include "net/vhost_net.h"
16 #include "net/vhost-vdpa.h"
17 #include "hw/virtio/vhost-vdpa.h"
18 #include "qemu/config-file.h"
19 #include "qemu/error-report.h"
21 #include "qemu/memalign.h"
22 #include "qemu/option.h"
23 #include "qapi/error.h"
24 #include <linux/vhost.h>
25 #include <sys/ioctl.h>
27 #include "standard-headers/linux/virtio_net.h"
28 #include "monitor/monitor.h"
29 #include "hw/virtio/vhost.h"
31 /* Todo:need to add the multiqueue support here */
32 typedef struct VhostVDPAState
{
34 struct vhost_vdpa vhost_vdpa
;
35 VHostNetState
*vhost_net
;
37 /* Control commands shadow buffers */
38 void *cvq_cmd_out_buffer
;
39 virtio_net_ctrl_ack
*status
;
44 const int vdpa_feature_bits
[] = {
45 VIRTIO_F_NOTIFY_ON_EMPTY
,
46 VIRTIO_RING_F_INDIRECT_DESC
,
47 VIRTIO_RING_F_EVENT_IDX
,
51 VIRTIO_NET_F_GUEST_CSUM
,
53 VIRTIO_NET_F_GUEST_TSO4
,
54 VIRTIO_NET_F_GUEST_TSO6
,
55 VIRTIO_NET_F_GUEST_ECN
,
56 VIRTIO_NET_F_GUEST_UFO
,
57 VIRTIO_NET_F_HOST_TSO4
,
58 VIRTIO_NET_F_HOST_TSO6
,
59 VIRTIO_NET_F_HOST_ECN
,
60 VIRTIO_NET_F_HOST_UFO
,
61 VIRTIO_NET_F_MRG_RXBUF
,
64 VIRTIO_NET_F_CTRL_RX_EXTRA
,
65 VIRTIO_NET_F_CTRL_VLAN
,
66 VIRTIO_NET_F_CTRL_MAC_ADDR
,
70 VIRTIO_F_IOMMU_PLATFORM
,
74 VIRTIO_NET_F_HASH_REPORT
,
75 VIRTIO_NET_F_GUEST_ANNOUNCE
,
77 VHOST_INVALID_FEATURE_BIT
80 /** Supported device specific feature bits with SVQ */
81 static const uint64_t vdpa_svq_device_features
=
82 BIT_ULL(VIRTIO_NET_F_CSUM
) |
83 BIT_ULL(VIRTIO_NET_F_GUEST_CSUM
) |
84 BIT_ULL(VIRTIO_NET_F_MTU
) |
85 BIT_ULL(VIRTIO_NET_F_MAC
) |
86 BIT_ULL(VIRTIO_NET_F_GUEST_TSO4
) |
87 BIT_ULL(VIRTIO_NET_F_GUEST_TSO6
) |
88 BIT_ULL(VIRTIO_NET_F_GUEST_ECN
) |
89 BIT_ULL(VIRTIO_NET_F_GUEST_UFO
) |
90 BIT_ULL(VIRTIO_NET_F_HOST_TSO4
) |
91 BIT_ULL(VIRTIO_NET_F_HOST_TSO6
) |
92 BIT_ULL(VIRTIO_NET_F_HOST_ECN
) |
93 BIT_ULL(VIRTIO_NET_F_HOST_UFO
) |
94 BIT_ULL(VIRTIO_NET_F_MRG_RXBUF
) |
95 BIT_ULL(VIRTIO_NET_F_STATUS
) |
96 BIT_ULL(VIRTIO_NET_F_CTRL_VQ
) |
97 BIT_ULL(VIRTIO_NET_F_MQ
) |
98 BIT_ULL(VIRTIO_F_ANY_LAYOUT
) |
99 BIT_ULL(VIRTIO_NET_F_CTRL_MAC_ADDR
) |
100 BIT_ULL(VIRTIO_NET_F_RSC_EXT
) |
101 BIT_ULL(VIRTIO_NET_F_STANDBY
);
103 VHostNetState
*vhost_vdpa_get_vhost_net(NetClientState
*nc
)
105 VhostVDPAState
*s
= DO_UPCAST(VhostVDPAState
, nc
, nc
);
106 assert(nc
->info
->type
== NET_CLIENT_DRIVER_VHOST_VDPA
);
110 static bool vhost_vdpa_net_valid_svq_features(uint64_t features
, Error
**errp
)
112 uint64_t invalid_dev_features
=
113 features
& ~vdpa_svq_device_features
&
114 /* Transport are all accepted at this point */
115 ~MAKE_64BIT_MASK(VIRTIO_TRANSPORT_F_START
,
116 VIRTIO_TRANSPORT_F_END
- VIRTIO_TRANSPORT_F_START
);
118 if (invalid_dev_features
) {
119 error_setg(errp
, "vdpa svq does not work with features 0x%" PRIx64
,
120 invalid_dev_features
);
123 return !invalid_dev_features
;
126 static int vhost_vdpa_net_check_device_id(struct vhost_net
*net
)
130 struct vhost_dev
*hdev
;
132 hdev
= (struct vhost_dev
*)&net
->dev
;
133 ret
= hdev
->vhost_ops
->vhost_get_device_id(hdev
, &device_id
);
134 if (device_id
!= VIRTIO_ID_NET
) {
140 static int vhost_vdpa_add(NetClientState
*ncs
, void *be
,
141 int queue_pair_index
, int nvqs
)
143 VhostNetOptions options
;
144 struct vhost_net
*net
= NULL
;
148 options
.backend_type
= VHOST_BACKEND_TYPE_VDPA
;
149 assert(ncs
->info
->type
== NET_CLIENT_DRIVER_VHOST_VDPA
);
150 s
= DO_UPCAST(VhostVDPAState
, nc
, ncs
);
151 options
.net_backend
= ncs
;
153 options
.busyloop_timeout
= 0;
156 net
= vhost_net_init(&options
);
158 error_report("failed to init vhost_net for queue");
162 ret
= vhost_vdpa_net_check_device_id(net
);
168 vhost_net_cleanup(net
);
174 static void vhost_vdpa_cleanup(NetClientState
*nc
)
176 VhostVDPAState
*s
= DO_UPCAST(VhostVDPAState
, nc
, nc
);
177 struct vhost_dev
*dev
= &s
->vhost_net
->dev
;
179 qemu_vfree(s
->cvq_cmd_out_buffer
);
180 qemu_vfree(s
->status
);
181 if (dev
->vq_index
+ dev
->nvqs
== dev
->vq_index_end
) {
182 g_clear_pointer(&s
->vhost_vdpa
.iova_tree
, vhost_iova_tree_delete
);
185 vhost_net_cleanup(s
->vhost_net
);
186 g_free(s
->vhost_net
);
189 if (s
->vhost_vdpa
.device_fd
>= 0) {
190 qemu_close(s
->vhost_vdpa
.device_fd
);
191 s
->vhost_vdpa
.device_fd
= -1;
195 static bool vhost_vdpa_has_vnet_hdr(NetClientState
*nc
)
197 assert(nc
->info
->type
== NET_CLIENT_DRIVER_VHOST_VDPA
);
202 static bool vhost_vdpa_has_ufo(NetClientState
*nc
)
204 assert(nc
->info
->type
== NET_CLIENT_DRIVER_VHOST_VDPA
);
205 VhostVDPAState
*s
= DO_UPCAST(VhostVDPAState
, nc
, nc
);
206 uint64_t features
= 0;
207 features
|= (1ULL << VIRTIO_NET_F_HOST_UFO
);
208 features
= vhost_net_get_features(s
->vhost_net
, features
);
209 return !!(features
& (1ULL << VIRTIO_NET_F_HOST_UFO
));
213 static bool vhost_vdpa_check_peer_type(NetClientState
*nc
, ObjectClass
*oc
,
216 const char *driver
= object_class_get_name(oc
);
218 if (!g_str_has_prefix(driver
, "virtio-net-")) {
219 error_setg(errp
, "vhost-vdpa requires frontend driver virtio-net-*");
226 /** Dummy receive in case qemu falls back to userland tap networking */
227 static ssize_t
vhost_vdpa_receive(NetClientState
*nc
, const uint8_t *buf
,
233 static NetClientInfo net_vhost_vdpa_info
= {
234 .type
= NET_CLIENT_DRIVER_VHOST_VDPA
,
235 .size
= sizeof(VhostVDPAState
),
236 .receive
= vhost_vdpa_receive
,
237 .cleanup
= vhost_vdpa_cleanup
,
238 .has_vnet_hdr
= vhost_vdpa_has_vnet_hdr
,
239 .has_ufo
= vhost_vdpa_has_ufo
,
240 .check_peer_type
= vhost_vdpa_check_peer_type
,
243 static void vhost_vdpa_cvq_unmap_buf(struct vhost_vdpa
*v
, void *addr
)
245 VhostIOVATree
*tree
= v
->iova_tree
;
248 * No need to specify size or to look for more translations since
249 * this contiguous chunk was allocated by us.
251 .translated_addr
= (hwaddr
)(uintptr_t)addr
,
253 const DMAMap
*map
= vhost_iova_tree_find_iova(tree
, &needle
);
256 if (unlikely(!map
)) {
257 error_report("Cannot locate expected map");
261 r
= vhost_vdpa_dma_unmap(v
, map
->iova
, map
->size
+ 1);
262 if (unlikely(r
!= 0)) {
263 error_report("Device cannot unmap: %s(%d)", g_strerror(r
), r
);
266 vhost_iova_tree_remove(tree
, *map
);
269 static size_t vhost_vdpa_net_cvq_cmd_len(void)
272 * MAC_TABLE_SET is the ctrl command that produces the longer out buffer.
273 * In buffer is always 1 byte, so it should fit here
275 return sizeof(struct virtio_net_ctrl_hdr
) +
276 2 * sizeof(struct virtio_net_ctrl_mac
) +
277 MAC_TABLE_ENTRIES
* ETH_ALEN
;
280 static size_t vhost_vdpa_net_cvq_cmd_page_len(void)
282 return ROUND_UP(vhost_vdpa_net_cvq_cmd_len(), qemu_real_host_page_size());
285 /** Map CVQ buffer. */
286 static int vhost_vdpa_cvq_map_buf(struct vhost_vdpa
*v
, void *buf
, size_t size
,
292 map
.translated_addr
= (hwaddr
)(uintptr_t)buf
;
294 map
.perm
= write
? IOMMU_RW
: IOMMU_RO
,
295 r
= vhost_iova_tree_map_alloc(v
->iova_tree
, &map
);
296 if (unlikely(r
!= IOVA_OK
)) {
297 error_report("Cannot map injected element");
301 r
= vhost_vdpa_dma_map(v
, map
.iova
, vhost_vdpa_net_cvq_cmd_page_len(), buf
,
303 if (unlikely(r
< 0)) {
310 vhost_iova_tree_remove(v
->iova_tree
, map
);
314 static int vhost_vdpa_net_cvq_start(NetClientState
*nc
)
319 assert(nc
->info
->type
== NET_CLIENT_DRIVER_VHOST_VDPA
);
321 s
= DO_UPCAST(VhostVDPAState
, nc
, nc
);
322 if (!s
->vhost_vdpa
.shadow_vqs_enabled
) {
326 r
= vhost_vdpa_cvq_map_buf(&s
->vhost_vdpa
, s
->cvq_cmd_out_buffer
,
327 vhost_vdpa_net_cvq_cmd_page_len(), false);
328 if (unlikely(r
< 0)) {
332 r
= vhost_vdpa_cvq_map_buf(&s
->vhost_vdpa
, s
->status
,
333 vhost_vdpa_net_cvq_cmd_page_len(), true);
334 if (unlikely(r
< 0)) {
335 vhost_vdpa_cvq_unmap_buf(&s
->vhost_vdpa
, s
->cvq_cmd_out_buffer
);
341 static void vhost_vdpa_net_cvq_stop(NetClientState
*nc
)
343 VhostVDPAState
*s
= DO_UPCAST(VhostVDPAState
, nc
, nc
);
345 assert(nc
->info
->type
== NET_CLIENT_DRIVER_VHOST_VDPA
);
347 if (s
->vhost_vdpa
.shadow_vqs_enabled
) {
348 vhost_vdpa_cvq_unmap_buf(&s
->vhost_vdpa
, s
->cvq_cmd_out_buffer
);
349 vhost_vdpa_cvq_unmap_buf(&s
->vhost_vdpa
, s
->status
);
353 static ssize_t
vhost_vdpa_net_cvq_add(VhostVDPAState
*s
, size_t out_len
,
356 /* Buffers for the device */
357 const struct iovec out
= {
358 .iov_base
= s
->cvq_cmd_out_buffer
,
361 const struct iovec in
= {
362 .iov_base
= s
->status
,
363 .iov_len
= sizeof(virtio_net_ctrl_ack
),
365 VhostShadowVirtqueue
*svq
= g_ptr_array_index(s
->vhost_vdpa
.shadow_vqs
, 0);
368 r
= vhost_svq_add(svq
, &out
, 1, &in
, 1, NULL
);
369 if (unlikely(r
!= 0)) {
370 if (unlikely(r
== -ENOSPC
)) {
371 qemu_log_mask(LOG_GUEST_ERROR
, "%s: No space on device queue\n",
378 * We can poll here since we've had BQL from the time we sent the
379 * descriptor. Also, we need to take the answer before SVQ pulls by itself,
380 * when BQL is released
382 return vhost_svq_poll(svq
);
385 static ssize_t
vhost_vdpa_net_load_cmd(VhostVDPAState
*s
, uint8_t class,
386 uint8_t cmd
, const void *data
,
389 const struct virtio_net_ctrl_hdr ctrl
= {
394 assert(data_size
< vhost_vdpa_net_cvq_cmd_page_len() - sizeof(ctrl
));
396 memcpy(s
->cvq_cmd_out_buffer
, &ctrl
, sizeof(ctrl
));
397 memcpy(s
->cvq_cmd_out_buffer
+ sizeof(ctrl
), data
, data_size
);
399 return vhost_vdpa_net_cvq_add(s
, sizeof(ctrl
) + data_size
,
400 sizeof(virtio_net_ctrl_ack
));
403 static int vhost_vdpa_net_load_mac(VhostVDPAState
*s
, const VirtIONet
*n
)
405 uint64_t features
= n
->parent_obj
.guest_features
;
406 if (features
& BIT_ULL(VIRTIO_NET_F_CTRL_MAC_ADDR
)) {
407 ssize_t dev_written
= vhost_vdpa_net_load_cmd(s
, VIRTIO_NET_CTRL_MAC
,
408 VIRTIO_NET_CTRL_MAC_ADDR_SET
,
409 n
->mac
, sizeof(n
->mac
));
410 if (unlikely(dev_written
< 0)) {
414 return *s
->status
!= VIRTIO_NET_OK
;
420 static int vhost_vdpa_net_load_mq(VhostVDPAState
*s
,
423 struct virtio_net_ctrl_mq mq
;
424 uint64_t features
= n
->parent_obj
.guest_features
;
427 if (!(features
& BIT_ULL(VIRTIO_NET_F_MQ
))) {
431 mq
.virtqueue_pairs
= cpu_to_le16(n
->curr_queue_pairs
);
432 dev_written
= vhost_vdpa_net_load_cmd(s
, VIRTIO_NET_CTRL_MQ
,
433 VIRTIO_NET_CTRL_MQ_VQ_PAIRS_SET
, &mq
,
435 if (unlikely(dev_written
< 0)) {
439 return *s
->status
!= VIRTIO_NET_OK
;
442 static int vhost_vdpa_net_load(NetClientState
*nc
)
444 VhostVDPAState
*s
= DO_UPCAST(VhostVDPAState
, nc
, nc
);
445 struct vhost_vdpa
*v
= &s
->vhost_vdpa
;
449 assert(nc
->info
->type
== NET_CLIENT_DRIVER_VHOST_VDPA
);
451 if (!v
->shadow_vqs_enabled
) {
455 n
= VIRTIO_NET(v
->dev
->vdev
);
456 r
= vhost_vdpa_net_load_mac(s
, n
);
457 if (unlikely(r
< 0)) {
460 r
= vhost_vdpa_net_load_mq(s
, n
);
468 static NetClientInfo net_vhost_vdpa_cvq_info
= {
469 .type
= NET_CLIENT_DRIVER_VHOST_VDPA
,
470 .size
= sizeof(VhostVDPAState
),
471 .receive
= vhost_vdpa_receive
,
472 .start
= vhost_vdpa_net_cvq_start
,
473 .load
= vhost_vdpa_net_load
,
474 .stop
= vhost_vdpa_net_cvq_stop
,
475 .cleanup
= vhost_vdpa_cleanup
,
476 .has_vnet_hdr
= vhost_vdpa_has_vnet_hdr
,
477 .has_ufo
= vhost_vdpa_has_ufo
,
478 .check_peer_type
= vhost_vdpa_check_peer_type
,
482 * Validate and copy control virtqueue commands.
484 * Following QEMU guidelines, we offer a copy of the buffers to the device to
485 * prevent TOCTOU bugs.
487 static int vhost_vdpa_net_handle_ctrl_avail(VhostShadowVirtqueue
*svq
,
488 VirtQueueElement
*elem
,
491 VhostVDPAState
*s
= opaque
;
493 virtio_net_ctrl_ack status
= VIRTIO_NET_ERR
;
494 /* Out buffer sent to both the vdpa device and the device model */
496 .iov_base
= s
->cvq_cmd_out_buffer
,
498 /* in buffer used for device model */
499 const struct iovec in
= {
501 .iov_len
= sizeof(status
),
503 ssize_t dev_written
= -EINVAL
;
505 out
.iov_len
= iov_to_buf(elem
->out_sg
, elem
->out_num
, 0,
506 s
->cvq_cmd_out_buffer
,
507 vhost_vdpa_net_cvq_cmd_len());
508 dev_written
= vhost_vdpa_net_cvq_add(s
, out
.iov_len
, sizeof(status
));
509 if (unlikely(dev_written
< 0)) {
513 if (unlikely(dev_written
< sizeof(status
))) {
514 error_report("Insufficient written data (%zu)", dev_written
);
518 if (*s
->status
!= VIRTIO_NET_OK
) {
519 return VIRTIO_NET_ERR
;
522 status
= VIRTIO_NET_ERR
;
523 virtio_net_handle_ctrl_iov(svq
->vdev
, &in
, 1, &out
, 1);
524 if (status
!= VIRTIO_NET_OK
) {
525 error_report("Bad CVQ processing in model");
529 in_len
= iov_from_buf(elem
->in_sg
, elem
->in_num
, 0, &status
,
531 if (unlikely(in_len
< sizeof(status
))) {
532 error_report("Bad device CVQ written length");
534 vhost_svq_push_elem(svq
, elem
, MIN(in_len
, sizeof(status
)));
536 return dev_written
< 0 ? dev_written
: 0;
539 static const VhostShadowVirtqueueOps vhost_vdpa_net_svq_ops
= {
540 .avail_handler
= vhost_vdpa_net_handle_ctrl_avail
,
543 static NetClientState
*net_vhost_vdpa_init(NetClientState
*peer
,
547 int queue_pair_index
,
551 struct vhost_vdpa_iova_range iova_range
,
552 VhostIOVATree
*iova_tree
)
554 NetClientState
*nc
= NULL
;
559 nc
= qemu_new_net_client(&net_vhost_vdpa_info
, peer
, device
,
562 nc
= qemu_new_net_control_client(&net_vhost_vdpa_cvq_info
, peer
,
565 qemu_set_info_str(nc
, TYPE_VHOST_VDPA
);
566 s
= DO_UPCAST(VhostVDPAState
, nc
, nc
);
568 s
->vhost_vdpa
.device_fd
= vdpa_device_fd
;
569 s
->vhost_vdpa
.index
= queue_pair_index
;
570 s
->vhost_vdpa
.shadow_vqs_enabled
= svq
;
571 s
->vhost_vdpa
.iova_range
= iova_range
;
572 s
->vhost_vdpa
.iova_tree
= iova_tree
;
574 s
->cvq_cmd_out_buffer
= qemu_memalign(qemu_real_host_page_size(),
575 vhost_vdpa_net_cvq_cmd_page_len());
576 memset(s
->cvq_cmd_out_buffer
, 0, vhost_vdpa_net_cvq_cmd_page_len());
577 s
->status
= qemu_memalign(qemu_real_host_page_size(),
578 vhost_vdpa_net_cvq_cmd_page_len());
579 memset(s
->status
, 0, vhost_vdpa_net_cvq_cmd_page_len());
581 s
->vhost_vdpa
.shadow_vq_ops
= &vhost_vdpa_net_svq_ops
;
582 s
->vhost_vdpa
.shadow_vq_ops_opaque
= s
;
584 ret
= vhost_vdpa_add(nc
, (void *)&s
->vhost_vdpa
, queue_pair_index
, nvqs
);
586 qemu_del_net_client(nc
);
592 static int vhost_vdpa_get_iova_range(int fd
,
593 struct vhost_vdpa_iova_range
*iova_range
)
595 int ret
= ioctl(fd
, VHOST_VDPA_GET_IOVA_RANGE
, iova_range
);
597 return ret
< 0 ? -errno
: 0;
600 static int vhost_vdpa_get_features(int fd
, uint64_t *features
, Error
**errp
)
602 int ret
= ioctl(fd
, VHOST_GET_FEATURES
, features
);
603 if (unlikely(ret
< 0)) {
604 error_setg_errno(errp
, errno
,
605 "Fail to query features from vhost-vDPA device");
610 static int vhost_vdpa_get_max_queue_pairs(int fd
, uint64_t features
,
611 int *has_cvq
, Error
**errp
)
613 unsigned long config_size
= offsetof(struct vhost_vdpa_config
, buf
);
614 g_autofree
struct vhost_vdpa_config
*config
= NULL
;
615 __virtio16
*max_queue_pairs
;
618 if (features
& (1 << VIRTIO_NET_F_CTRL_VQ
)) {
624 if (features
& (1 << VIRTIO_NET_F_MQ
)) {
625 config
= g_malloc0(config_size
+ sizeof(*max_queue_pairs
));
626 config
->off
= offsetof(struct virtio_net_config
, max_virtqueue_pairs
);
627 config
->len
= sizeof(*max_queue_pairs
);
629 ret
= ioctl(fd
, VHOST_VDPA_GET_CONFIG
, config
);
631 error_setg(errp
, "Fail to get config from vhost-vDPA device");
635 max_queue_pairs
= (__virtio16
*)&config
->buf
;
637 return lduw_le_p(max_queue_pairs
);
643 int net_init_vhost_vdpa(const Netdev
*netdev
, const char *name
,
644 NetClientState
*peer
, Error
**errp
)
646 const NetdevVhostVDPAOptions
*opts
;
649 g_autofree NetClientState
**ncs
= NULL
;
650 g_autoptr(VhostIOVATree
) iova_tree
= NULL
;
651 struct vhost_vdpa_iova_range iova_range
;
653 int queue_pairs
, r
, i
= 0, has_cvq
= 0;
655 assert(netdev
->type
== NET_CLIENT_DRIVER_VHOST_VDPA
);
656 opts
= &netdev
->u
.vhost_vdpa
;
657 if (!opts
->vhostdev
&& !opts
->vhostfd
) {
659 "vhost-vdpa: neither vhostdev= nor vhostfd= was specified");
663 if (opts
->vhostdev
&& opts
->vhostfd
) {
665 "vhost-vdpa: vhostdev= and vhostfd= are mutually exclusive");
669 if (opts
->vhostdev
) {
670 vdpa_device_fd
= qemu_open(opts
->vhostdev
, O_RDWR
, errp
);
671 if (vdpa_device_fd
== -1) {
676 vdpa_device_fd
= monitor_fd_param(monitor_cur(), opts
->vhostfd
, errp
);
677 if (vdpa_device_fd
== -1) {
678 error_prepend(errp
, "vhost-vdpa: unable to parse vhostfd: ");
683 r
= vhost_vdpa_get_features(vdpa_device_fd
, &features
, errp
);
684 if (unlikely(r
< 0)) {
688 queue_pairs
= vhost_vdpa_get_max_queue_pairs(vdpa_device_fd
, features
,
690 if (queue_pairs
< 0) {
691 qemu_close(vdpa_device_fd
);
695 vhost_vdpa_get_iova_range(vdpa_device_fd
, &iova_range
);
697 if (!vhost_vdpa_net_valid_svq_features(features
, errp
)) {
701 iova_tree
= vhost_iova_tree_new(iova_range
.first
, iova_range
.last
);
704 ncs
= g_malloc0(sizeof(*ncs
) * queue_pairs
);
706 for (i
= 0; i
< queue_pairs
; i
++) {
707 ncs
[i
] = net_vhost_vdpa_init(peer
, TYPE_VHOST_VDPA
, name
,
708 vdpa_device_fd
, i
, 2, true, opts
->x_svq
,
709 iova_range
, iova_tree
);
715 nc
= net_vhost_vdpa_init(peer
, TYPE_VHOST_VDPA
, name
,
716 vdpa_device_fd
, i
, 1, false,
717 opts
->x_svq
, iova_range
, iova_tree
);
722 /* iova_tree ownership belongs to last NetClientState */
723 g_steal_pointer(&iova_tree
);
728 for (i
--; i
>= 0; i
--) {
729 qemu_del_net_client(ncs
[i
]);
734 qemu_close(vdpa_device_fd
);