]> git.proxmox.com Git - mirror_qemu.git/blame - net/vhost-vdpa.c
Merge tag 'firmware/seabios-20231010-pull-request' of https://gitlab.com/kraxel/qemu...
[mirror_qemu.git] / net / vhost-vdpa.c
CommitLineData
1e0a84ea
CL
1/*
2 * vhost-vdpa.c
3 *
4 * Copyright(c) 2017-2018 Intel Corporation.
5 * Copyright(c) 2020 Red Hat, Inc.
6 *
7 * This work is licensed under the terms of the GNU GPL, version 2 or later.
8 * See the COPYING file in the top-level directory.
9 *
10 */
11
12#include "qemu/osdep.h"
13#include "clients.h"
bd907ae4 14#include "hw/virtio/virtio-net.h"
1e0a84ea
CL
15#include "net/vhost_net.h"
16#include "net/vhost-vdpa.h"
17#include "hw/virtio/vhost-vdpa.h"
18#include "qemu/config-file.h"
19#include "qemu/error-report.h"
bd907ae4
EP
20#include "qemu/log.h"
21#include "qemu/memalign.h"
1e0a84ea
CL
22#include "qemu/option.h"
23#include "qapi/error.h"
40237840 24#include <linux/vhost.h>
1e0a84ea
CL
25#include <sys/ioctl.h>
26#include <err.h>
27#include "standard-headers/linux/virtio_net.h"
28#include "monitor/monitor.h"
69498430
EP
29#include "migration/migration.h"
30#include "migration/misc.h"
1e0a84ea
CL
31#include "hw/virtio/vhost.h"
32
33/* Todo:need to add the multiqueue support here */
34typedef struct VhostVDPAState {
35 NetClientState nc;
36 struct vhost_vdpa vhost_vdpa;
69498430 37 Notifier migration_state;
1e0a84ea 38 VHostNetState *vhost_net;
2df4dd31
EP
39
40 /* Control commands shadow buffers */
17fb889f
EP
41 void *cvq_cmd_out_buffer;
42 virtio_net_ctrl_ack *status;
43
7f211a28
EP
44 /* The device always have SVQ enabled */
45 bool always_svq;
152128d6
EP
46
47 /* The device can isolate CVQ in its own ASID */
48 bool cvq_isolated;
49
1e0a84ea
CL
50 bool started;
51} VhostVDPAState;
52
2875a0ca
HJ
53/*
54 * The array is sorted alphabetically in ascending order,
55 * with the exception of VHOST_INVALID_FEATURE_BIT,
56 * which should always be the last entry.
57 */
1e0a84ea 58const int vdpa_feature_bits[] = {
1e0a84ea 59 VIRTIO_F_ANY_LAYOUT,
2875a0ca
HJ
60 VIRTIO_F_IOMMU_PLATFORM,
61 VIRTIO_F_NOTIFY_ON_EMPTY,
62 VIRTIO_F_RING_PACKED,
63 VIRTIO_F_RING_RESET,
1e0a84ea
CL
64 VIRTIO_F_VERSION_1,
65 VIRTIO_NET_F_CSUM,
51e84244 66 VIRTIO_NET_F_CTRL_GUEST_OFFLOADS,
2875a0ca
HJ
67 VIRTIO_NET_F_CTRL_MAC_ADDR,
68 VIRTIO_NET_F_CTRL_RX,
69 VIRTIO_NET_F_CTRL_RX_EXTRA,
70 VIRTIO_NET_F_CTRL_VLAN,
71 VIRTIO_NET_F_CTRL_VQ,
1e0a84ea 72 VIRTIO_NET_F_GSO,
2875a0ca
HJ
73 VIRTIO_NET_F_GUEST_CSUM,
74 VIRTIO_NET_F_GUEST_ECN,
1e0a84ea
CL
75 VIRTIO_NET_F_GUEST_TSO4,
76 VIRTIO_NET_F_GUEST_TSO6,
1e0a84ea 77 VIRTIO_NET_F_GUEST_UFO,
9da16849
AM
78 VIRTIO_NET_F_GUEST_USO4,
79 VIRTIO_NET_F_GUEST_USO6,
2875a0ca
HJ
80 VIRTIO_NET_F_HASH_REPORT,
81 VIRTIO_NET_F_HOST_ECN,
1e0a84ea
CL
82 VIRTIO_NET_F_HOST_TSO4,
83 VIRTIO_NET_F_HOST_TSO6,
1e0a84ea 84 VIRTIO_NET_F_HOST_UFO,
9da16849 85 VIRTIO_NET_F_HOST_USO,
2875a0ca 86 VIRTIO_NET_F_MQ,
1e0a84ea
CL
87 VIRTIO_NET_F_MRG_RXBUF,
88 VIRTIO_NET_F_MTU,
0145c393 89 VIRTIO_NET_F_RSS,
9aa47edd 90 VIRTIO_NET_F_STATUS,
2875a0ca
HJ
91 VIRTIO_RING_F_EVENT_IDX,
92 VIRTIO_RING_F_INDIRECT_DESC,
93
94 /* VHOST_INVALID_FEATURE_BIT should always be the last entry */
1e0a84ea
CL
95 VHOST_INVALID_FEATURE_BIT
96};
97
1576dbb5
EP
98/** Supported device specific feature bits with SVQ */
99static const uint64_t vdpa_svq_device_features =
100 BIT_ULL(VIRTIO_NET_F_CSUM) |
101 BIT_ULL(VIRTIO_NET_F_GUEST_CSUM) |
4b4a1378 102 BIT_ULL(VIRTIO_NET_F_CTRL_GUEST_OFFLOADS) |
1576dbb5
EP
103 BIT_ULL(VIRTIO_NET_F_MTU) |
104 BIT_ULL(VIRTIO_NET_F_MAC) |
105 BIT_ULL(VIRTIO_NET_F_GUEST_TSO4) |
106 BIT_ULL(VIRTIO_NET_F_GUEST_TSO6) |
107 BIT_ULL(VIRTIO_NET_F_GUEST_ECN) |
108 BIT_ULL(VIRTIO_NET_F_GUEST_UFO) |
109 BIT_ULL(VIRTIO_NET_F_HOST_TSO4) |
110 BIT_ULL(VIRTIO_NET_F_HOST_TSO6) |
111 BIT_ULL(VIRTIO_NET_F_HOST_ECN) |
112 BIT_ULL(VIRTIO_NET_F_HOST_UFO) |
113 BIT_ULL(VIRTIO_NET_F_MRG_RXBUF) |
114 BIT_ULL(VIRTIO_NET_F_STATUS) |
115 BIT_ULL(VIRTIO_NET_F_CTRL_VQ) |
ea6eec49 116 BIT_ULL(VIRTIO_NET_F_CTRL_RX) |
e213c45a 117 BIT_ULL(VIRTIO_NET_F_CTRL_VLAN) |
d669b7bb 118 BIT_ULL(VIRTIO_NET_F_CTRL_RX_EXTRA) |
72b99a87 119 BIT_ULL(VIRTIO_NET_F_MQ) |
1576dbb5
EP
120 BIT_ULL(VIRTIO_F_ANY_LAYOUT) |
121 BIT_ULL(VIRTIO_NET_F_CTRL_MAC_ADDR) |
609ab4c3
EP
122 /* VHOST_F_LOG_ALL is exposed by SVQ */
123 BIT_ULL(VHOST_F_LOG_ALL) |
1576dbb5 124 BIT_ULL(VIRTIO_NET_F_RSC_EXT) |
0d74e2b7
EP
125 BIT_ULL(VIRTIO_NET_F_STANDBY) |
126 BIT_ULL(VIRTIO_NET_F_SPEED_DUPLEX);
1576dbb5 127
c1a10086
EP
128#define VHOST_VDPA_NET_CVQ_ASID 1
129
1e0a84ea
CL
130VHostNetState *vhost_vdpa_get_vhost_net(NetClientState *nc)
131{
132 VhostVDPAState *s = DO_UPCAST(VhostVDPAState, nc, nc);
133 assert(nc->info->type == NET_CLIENT_DRIVER_VHOST_VDPA);
134 return s->vhost_net;
135}
136
915bf6cc
EP
137static size_t vhost_vdpa_net_cvq_cmd_len(void)
138{
139 /*
140 * MAC_TABLE_SET is the ctrl command that produces the longer out buffer.
141 * In buffer is always 1 byte, so it should fit here
142 */
143 return sizeof(struct virtio_net_ctrl_hdr) +
144 2 * sizeof(struct virtio_net_ctrl_mac) +
145 MAC_TABLE_ENTRIES * ETH_ALEN;
146}
147
148static size_t vhost_vdpa_net_cvq_cmd_page_len(void)
149{
150 return ROUND_UP(vhost_vdpa_net_cvq_cmd_len(), qemu_real_host_page_size());
151}
152
36e46472
EP
153static bool vhost_vdpa_net_valid_svq_features(uint64_t features, Error **errp)
154{
155 uint64_t invalid_dev_features =
156 features & ~vdpa_svq_device_features &
157 /* Transport are all accepted at this point */
158 ~MAKE_64BIT_MASK(VIRTIO_TRANSPORT_F_START,
159 VIRTIO_TRANSPORT_F_END - VIRTIO_TRANSPORT_F_START);
160
161 if (invalid_dev_features) {
162 error_setg(errp, "vdpa svq does not work with features 0x%" PRIx64,
163 invalid_dev_features);
258a0394 164 return false;
36e46472
EP
165 }
166
258a0394 167 return vhost_svq_valid_features(features, errp);
36e46472
EP
168}
169
1e0a84ea
CL
170static int vhost_vdpa_net_check_device_id(struct vhost_net *net)
171{
172 uint32_t device_id;
173 int ret;
174 struct vhost_dev *hdev;
175
176 hdev = (struct vhost_dev *)&net->dev;
177 ret = hdev->vhost_ops->vhost_get_device_id(hdev, &device_id);
178 if (device_id != VIRTIO_ID_NET) {
179 return -ENOTSUP;
180 }
181 return ret;
182}
183
40237840
JW
184static int vhost_vdpa_add(NetClientState *ncs, void *be,
185 int queue_pair_index, int nvqs)
1e0a84ea
CL
186{
187 VhostNetOptions options;
188 struct vhost_net *net = NULL;
189 VhostVDPAState *s;
190 int ret;
191
192 options.backend_type = VHOST_BACKEND_TYPE_VDPA;
193 assert(ncs->info->type == NET_CLIENT_DRIVER_VHOST_VDPA);
194 s = DO_UPCAST(VhostVDPAState, nc, ncs);
195 options.net_backend = ncs;
196 options.opaque = be;
197 options.busyloop_timeout = 0;
40237840 198 options.nvqs = nvqs;
1e0a84ea
CL
199
200 net = vhost_net_init(&options);
201 if (!net) {
202 error_report("failed to init vhost_net for queue");
a97ef87a 203 goto err_init;
1e0a84ea 204 }
1e0a84ea
CL
205 s->vhost_net = net;
206 ret = vhost_vdpa_net_check_device_id(net);
207 if (ret) {
a97ef87a 208 goto err_check;
1e0a84ea
CL
209 }
210 return 0;
a97ef87a
JW
211err_check:
212 vhost_net_cleanup(net);
213 g_free(net);
214err_init:
1e0a84ea
CL
215 return -1;
216}
217
218static void vhost_vdpa_cleanup(NetClientState *nc)
219{
220 VhostVDPAState *s = DO_UPCAST(VhostVDPAState, nc, nc);
221
a0d7215e
AS
222 /*
223 * If a peer NIC is attached, do not cleanup anything.
224 * Cleanup will happen as a part of qemu_cleanup() -> net_cleanup()
225 * when the guest is shutting down.
226 */
227 if (nc->peer && nc->peer->info->type == NET_CLIENT_DRIVER_NIC) {
228 return;
229 }
babf8b87
EP
230 munmap(s->cvq_cmd_out_buffer, vhost_vdpa_net_cvq_cmd_page_len());
231 munmap(s->status, vhost_vdpa_net_cvq_cmd_page_len());
1e0a84ea
CL
232 if (s->vhost_net) {
233 vhost_net_cleanup(s->vhost_net);
234 g_free(s->vhost_net);
235 s->vhost_net = NULL;
236 }
57b3a7d8
CL
237 if (s->vhost_vdpa.device_fd >= 0) {
238 qemu_close(s->vhost_vdpa.device_fd);
239 s->vhost_vdpa.device_fd = -1;
240 }
1e0a84ea
CL
241}
242
243static bool vhost_vdpa_has_vnet_hdr(NetClientState *nc)
244{
245 assert(nc->info->type == NET_CLIENT_DRIVER_VHOST_VDPA);
246
247 return true;
248}
249
250static bool vhost_vdpa_has_ufo(NetClientState *nc)
251{
252 assert(nc->info->type == NET_CLIENT_DRIVER_VHOST_VDPA);
253 VhostVDPAState *s = DO_UPCAST(VhostVDPAState, nc, nc);
254 uint64_t features = 0;
255 features |= (1ULL << VIRTIO_NET_F_HOST_UFO);
256 features = vhost_net_get_features(s->vhost_net, features);
257 return !!(features & (1ULL << VIRTIO_NET_F_HOST_UFO));
258
259}
260
ee8a1c63
KW
261static bool vhost_vdpa_check_peer_type(NetClientState *nc, ObjectClass *oc,
262 Error **errp)
263{
264 const char *driver = object_class_get_name(oc);
265
266 if (!g_str_has_prefix(driver, "virtio-net-")) {
267 error_setg(errp, "vhost-vdpa requires frontend driver virtio-net-*");
268 return false;
269 }
270
271 return true;
272}
273
846a1e85
EP
274/** Dummy receive in case qemu falls back to userland tap networking */
275static ssize_t vhost_vdpa_receive(NetClientState *nc, const uint8_t *buf,
276 size_t size)
277{
bc5add1d 278 return size;
846a1e85
EP
279}
280
00ef422e
EP
281/** From any vdpa net client, get the netclient of the first queue pair */
282static VhostVDPAState *vhost_vdpa_net_first_nc_vdpa(VhostVDPAState *s)
283{
284 NICState *nic = qemu_get_nic(s->nc.peer);
285 NetClientState *nc0 = qemu_get_peer(nic->ncs, 0);
286
287 return DO_UPCAST(VhostVDPAState, nc, nc0);
288}
289
69498430
EP
290static void vhost_vdpa_net_log_global_enable(VhostVDPAState *s, bool enable)
291{
292 struct vhost_vdpa *v = &s->vhost_vdpa;
293 VirtIONet *n;
294 VirtIODevice *vdev;
295 int data_queue_pairs, cvq, r;
296
297 /* We are only called on the first data vqs and only if x-svq is not set */
298 if (s->vhost_vdpa.shadow_vqs_enabled == enable) {
299 return;
300 }
301
302 vdev = v->dev->vdev;
303 n = VIRTIO_NET(vdev);
304 if (!n->vhost_started) {
305 return;
306 }
307
308 data_queue_pairs = n->multiqueue ? n->max_queue_pairs : 1;
309 cvq = virtio_vdev_has_feature(vdev, VIRTIO_NET_F_CTRL_VQ) ?
310 n->max_ncs - n->max_queue_pairs : 0;
311 /*
312 * TODO: vhost_net_stop does suspend, get_base and reset. We can be smarter
313 * in the future and resume the device if read-only operations between
314 * suspend and reset goes wrong.
315 */
316 vhost_net_stop(vdev, n->nic->ncs, data_queue_pairs, cvq);
317
318 /* Start will check migration setup_or_active to configure or not SVQ */
319 r = vhost_net_start(vdev, n->nic->ncs, data_queue_pairs, cvq);
320 if (unlikely(r < 0)) {
321 error_report("unable to start vhost net: %s(%d)", g_strerror(-r), -r);
322 }
323}
324
325static void vdpa_net_migration_state_notifier(Notifier *notifier, void *data)
326{
327 MigrationState *migration = data;
328 VhostVDPAState *s = container_of(notifier, VhostVDPAState,
329 migration_state);
330
331 if (migration_in_setup(migration)) {
332 vhost_vdpa_net_log_global_enable(s, true);
333 } else if (migration_has_failed(migration)) {
334 vhost_vdpa_net_log_global_enable(s, false);
335 }
336}
337
00ef422e
EP
338static void vhost_vdpa_net_data_start_first(VhostVDPAState *s)
339{
340 struct vhost_vdpa *v = &s->vhost_vdpa;
341
69498430 342 add_migration_state_change_notifier(&s->migration_state);
00ef422e
EP
343 if (v->shadow_vqs_enabled) {
344 v->iova_tree = vhost_iova_tree_new(v->iova_range.first,
345 v->iova_range.last);
346 }
347}
348
349static int vhost_vdpa_net_data_start(NetClientState *nc)
350{
351 VhostVDPAState *s = DO_UPCAST(VhostVDPAState, nc, nc);
352 struct vhost_vdpa *v = &s->vhost_vdpa;
353
354 assert(nc->info->type == NET_CLIENT_DRIVER_VHOST_VDPA);
355
69498430
EP
356 if (s->always_svq ||
357 migration_is_setup_or_active(migrate_get_current()->state)) {
358 v->shadow_vqs_enabled = true;
359 v->shadow_data = true;
360 } else {
361 v->shadow_vqs_enabled = false;
362 v->shadow_data = false;
363 }
364
00ef422e
EP
365 if (v->index == 0) {
366 vhost_vdpa_net_data_start_first(s);
367 return 0;
368 }
369
370 if (v->shadow_vqs_enabled) {
371 VhostVDPAState *s0 = vhost_vdpa_net_first_nc_vdpa(s);
372 v->iova_tree = s0->vhost_vdpa.iova_tree;
373 }
374
375 return 0;
376}
377
6c482547
EP
378static int vhost_vdpa_net_data_load(NetClientState *nc)
379{
380 VhostVDPAState *s = DO_UPCAST(VhostVDPAState, nc, nc);
381 struct vhost_vdpa *v = &s->vhost_vdpa;
382 bool has_cvq = v->dev->vq_index_end % 2;
383
384 if (has_cvq) {
385 return 0;
386 }
387
388 for (int i = 0; i < v->dev->nvqs; ++i) {
389 vhost_vdpa_set_vring_ready(v, i + v->dev->vq_index);
390 }
391 return 0;
392}
393
00ef422e
EP
394static void vhost_vdpa_net_client_stop(NetClientState *nc)
395{
396 VhostVDPAState *s = DO_UPCAST(VhostVDPAState, nc, nc);
397 struct vhost_dev *dev;
398
399 assert(nc->info->type == NET_CLIENT_DRIVER_VHOST_VDPA);
400
69498430
EP
401 if (s->vhost_vdpa.index == 0) {
402 remove_migration_state_change_notifier(&s->migration_state);
403 }
404
00ef422e
EP
405 dev = s->vhost_vdpa.dev;
406 if (dev->vq_index + dev->nvqs == dev->vq_index_end) {
407 g_clear_pointer(&s->vhost_vdpa.iova_tree, vhost_iova_tree_delete);
0a7a164b
EP
408 } else {
409 s->vhost_vdpa.iova_tree = NULL;
00ef422e
EP
410 }
411}
412
1e0a84ea
CL
413static NetClientInfo net_vhost_vdpa_info = {
414 .type = NET_CLIENT_DRIVER_VHOST_VDPA,
415 .size = sizeof(VhostVDPAState),
846a1e85 416 .receive = vhost_vdpa_receive,
00ef422e 417 .start = vhost_vdpa_net_data_start,
6c482547 418 .load = vhost_vdpa_net_data_load,
00ef422e 419 .stop = vhost_vdpa_net_client_stop,
1e0a84ea
CL
420 .cleanup = vhost_vdpa_cleanup,
421 .has_vnet_hdr = vhost_vdpa_has_vnet_hdr,
422 .has_ufo = vhost_vdpa_has_ufo,
ee8a1c63 423 .check_peer_type = vhost_vdpa_check_peer_type,
1e0a84ea
CL
424};
425
152128d6
EP
426static int64_t vhost_vdpa_get_vring_group(int device_fd, unsigned vq_index,
427 Error **errp)
c1a10086
EP
428{
429 struct vhost_vring_state state = {
430 .index = vq_index,
431 };
432 int r = ioctl(device_fd, VHOST_VDPA_GET_VRING_GROUP, &state);
433
434 if (unlikely(r < 0)) {
0f2bb0bf 435 r = -errno;
152128d6 436 error_setg_errno(errp, errno, "Cannot get VQ %u group", vq_index);
c1a10086
EP
437 return r;
438 }
439
440 return state.num;
441}
442
443static int vhost_vdpa_set_address_space_id(struct vhost_vdpa *v,
444 unsigned vq_group,
445 unsigned asid_num)
446{
447 struct vhost_vring_state asid = {
448 .index = vq_group,
449 .num = asid_num,
450 };
451 int r;
452
453 r = ioctl(v->device_fd, VHOST_VDPA_SET_GROUP_ASID, &asid);
454 if (unlikely(r < 0)) {
455 error_report("Can't set vq group %u asid %u, errno=%d (%s)",
456 asid.index, asid.num, errno, g_strerror(errno));
457 }
458 return r;
459}
460
2df4dd31
EP
461static void vhost_vdpa_cvq_unmap_buf(struct vhost_vdpa *v, void *addr)
462{
463 VhostIOVATree *tree = v->iova_tree;
464 DMAMap needle = {
465 /*
466 * No need to specify size or to look for more translations since
467 * this contiguous chunk was allocated by us.
468 */
469 .translated_addr = (hwaddr)(uintptr_t)addr,
470 };
471 const DMAMap *map = vhost_iova_tree_find_iova(tree, &needle);
472 int r;
473
474 if (unlikely(!map)) {
475 error_report("Cannot locate expected map");
476 return;
477 }
478
cd831ed5 479 r = vhost_vdpa_dma_unmap(v, v->address_space_id, map->iova, map->size + 1);
2df4dd31
EP
480 if (unlikely(r != 0)) {
481 error_report("Device cannot unmap: %s(%d)", g_strerror(r), r);
482 }
483
69292a8e 484 vhost_iova_tree_remove(tree, *map);
2df4dd31
EP
485}
486
7a7f87e9
EP
487/** Map CVQ buffer. */
488static int vhost_vdpa_cvq_map_buf(struct vhost_vdpa *v, void *buf, size_t size,
489 bool write)
2df4dd31
EP
490{
491 DMAMap map = {};
492 int r;
493
2df4dd31 494 map.translated_addr = (hwaddr)(uintptr_t)buf;
7a7f87e9 495 map.size = size - 1;
2df4dd31
EP
496 map.perm = write ? IOMMU_RW : IOMMU_RO,
497 r = vhost_iova_tree_map_alloc(v->iova_tree, &map);
498 if (unlikely(r != IOVA_OK)) {
499 error_report("Cannot map injected element");
7a7f87e9 500 return r;
2df4dd31
EP
501 }
502
cd831ed5
EP
503 r = vhost_vdpa_dma_map(v, v->address_space_id, map.iova,
504 vhost_vdpa_net_cvq_cmd_page_len(), buf, !write);
2df4dd31
EP
505 if (unlikely(r < 0)) {
506 goto dma_map_err;
507 }
508
7a7f87e9 509 return 0;
2df4dd31
EP
510
511dma_map_err:
69292a8e 512 vhost_iova_tree_remove(v->iova_tree, map);
7a7f87e9 513 return r;
2df4dd31
EP
514}
515
7a7f87e9 516static int vhost_vdpa_net_cvq_start(NetClientState *nc)
2df4dd31 517{
00ef422e 518 VhostVDPAState *s, *s0;
c1a10086 519 struct vhost_vdpa *v;
c1a10086 520 int64_t cvq_group;
152128d6
EP
521 int r;
522 Error *err = NULL;
2df4dd31 523
7a7f87e9
EP
524 assert(nc->info->type == NET_CLIENT_DRIVER_VHOST_VDPA);
525
526 s = DO_UPCAST(VhostVDPAState, nc, nc);
c1a10086
EP
527 v = &s->vhost_vdpa;
528
69498430
EP
529 s0 = vhost_vdpa_net_first_nc_vdpa(s);
530 v->shadow_data = s0->vhost_vdpa.shadow_vqs_enabled;
b40eba9c 531 v->shadow_vqs_enabled = s0->vhost_vdpa.shadow_vqs_enabled;
c1a10086
EP
532 s->vhost_vdpa.address_space_id = VHOST_VDPA_GUEST_PA_ASID;
533
69498430 534 if (s->vhost_vdpa.shadow_data) {
c1a10086
EP
535 /* SVQ is already configured for all virtqueues */
536 goto out;
537 }
538
539 /*
540 * If we early return in these cases SVQ will not be enabled. The migration
541 * will be blocked as long as vhost-vdpa backends will not offer _F_LOG.
c1a10086 542 */
152128d6
EP
543 if (!vhost_vdpa_net_valid_svq_features(v->dev->features, NULL)) {
544 return 0;
c1a10086 545 }
152128d6
EP
546
547 if (!s->cvq_isolated) {
c1a10086
EP
548 return 0;
549 }
550
152128d6
EP
551 cvq_group = vhost_vdpa_get_vring_group(v->device_fd,
552 v->dev->vq_index_end - 1,
553 &err);
c1a10086 554 if (unlikely(cvq_group < 0)) {
152128d6 555 error_report_err(err);
c1a10086
EP
556 return cvq_group;
557 }
c1a10086
EP
558
559 r = vhost_vdpa_set_address_space_id(v, cvq_group, VHOST_VDPA_NET_CVQ_ASID);
560 if (unlikely(r < 0)) {
561 return r;
562 }
563
c1a10086
EP
564 v->shadow_vqs_enabled = true;
565 s->vhost_vdpa.address_space_id = VHOST_VDPA_NET_CVQ_ASID;
566
567out:
7a7f87e9
EP
568 if (!s->vhost_vdpa.shadow_vqs_enabled) {
569 return 0;
2df4dd31
EP
570 }
571
00ef422e
EP
572 if (s0->vhost_vdpa.iova_tree) {
573 /*
574 * SVQ is already configured for all virtqueues. Reuse IOVA tree for
575 * simplicity, whether CVQ shares ASID with guest or not, because:
576 * - Memory listener need access to guest's memory addresses allocated
577 * in the IOVA tree.
578 * - There should be plenty of IOVA address space for both ASID not to
579 * worry about collisions between them. Guest's translations are
580 * still validated with virtio virtqueue_pop so there is no risk for
581 * the guest to access memory that it shouldn't.
582 *
583 * To allocate a iova tree per ASID is doable but it complicates the
584 * code and it is not worth it for the moment.
585 */
586 v->iova_tree = s0->vhost_vdpa.iova_tree;
587 } else {
588 v->iova_tree = vhost_iova_tree_new(v->iova_range.first,
589 v->iova_range.last);
590 }
591
7a7f87e9
EP
592 r = vhost_vdpa_cvq_map_buf(&s->vhost_vdpa, s->cvq_cmd_out_buffer,
593 vhost_vdpa_net_cvq_cmd_page_len(), false);
594 if (unlikely(r < 0)) {
595 return r;
596 }
597
17fb889f 598 r = vhost_vdpa_cvq_map_buf(&s->vhost_vdpa, s->status,
7a7f87e9
EP
599 vhost_vdpa_net_cvq_cmd_page_len(), true);
600 if (unlikely(r < 0)) {
2df4dd31 601 vhost_vdpa_cvq_unmap_buf(&s->vhost_vdpa, s->cvq_cmd_out_buffer);
2df4dd31
EP
602 }
603
7a7f87e9
EP
604 return r;
605}
606
607static void vhost_vdpa_net_cvq_stop(NetClientState *nc)
608{
609 VhostVDPAState *s = DO_UPCAST(VhostVDPAState, nc, nc);
610
611 assert(nc->info->type == NET_CLIENT_DRIVER_VHOST_VDPA);
612
613 if (s->vhost_vdpa.shadow_vqs_enabled) {
614 vhost_vdpa_cvq_unmap_buf(&s->vhost_vdpa, s->cvq_cmd_out_buffer);
17fb889f 615 vhost_vdpa_cvq_unmap_buf(&s->vhost_vdpa, s->status);
7a7f87e9 616 }
00ef422e
EP
617
618 vhost_vdpa_net_client_stop(nc);
2df4dd31
EP
619}
620
be4278b6
EP
621static ssize_t vhost_vdpa_net_cvq_add(VhostVDPAState *s, size_t out_len,
622 size_t in_len)
623{
624 /* Buffers for the device */
625 const struct iovec out = {
626 .iov_base = s->cvq_cmd_out_buffer,
627 .iov_len = out_len,
628 };
629 const struct iovec in = {
17fb889f 630 .iov_base = s->status,
be4278b6
EP
631 .iov_len = sizeof(virtio_net_ctrl_ack),
632 };
633 VhostShadowVirtqueue *svq = g_ptr_array_index(s->vhost_vdpa.shadow_vqs, 0);
634 int r;
635
636 r = vhost_svq_add(svq, &out, 1, &in, 1, NULL);
637 if (unlikely(r != 0)) {
638 if (unlikely(r == -ENOSPC)) {
639 qemu_log_mask(LOG_GUEST_ERROR, "%s: No space on device queue\n",
640 __func__);
641 }
642 return r;
643 }
644
645 /*
646 * We can poll here since we've had BQL from the time we sent the
647 * descriptor. Also, we need to take the answer before SVQ pulls by itself,
648 * when BQL is released
649 */
b0de17a2 650 return vhost_svq_poll(svq, 1);
be4278b6
EP
651}
652
f73c0c43 653static ssize_t vhost_vdpa_net_load_cmd(VhostVDPAState *s, uint8_t class,
2848c6aa
HJ
654 uint8_t cmd, const struct iovec *data_sg,
655 size_t data_num)
f73c0c43
EP
656{
657 const struct virtio_net_ctrl_hdr ctrl = {
658 .class = class,
659 .cmd = cmd,
660 };
2848c6aa 661 size_t data_size = iov_size(data_sg, data_num);
f73c0c43
EP
662
663 assert(data_size < vhost_vdpa_net_cvq_cmd_page_len() - sizeof(ctrl));
664
2848c6aa 665 /* pack the CVQ command header */
f73c0c43 666 memcpy(s->cvq_cmd_out_buffer, &ctrl, sizeof(ctrl));
f73c0c43 667
2848c6aa
HJ
668 /* pack the CVQ command command-specific-data */
669 iov_to_buf(data_sg, data_num, 0,
670 s->cvq_cmd_out_buffer + sizeof(ctrl), data_size);
671
672 return vhost_vdpa_net_cvq_add(s, data_size + sizeof(ctrl),
f73c0c43
EP
673 sizeof(virtio_net_ctrl_ack));
674}
675
676static int vhost_vdpa_net_load_mac(VhostVDPAState *s, const VirtIONet *n)
677{
02d3bf09 678 if (virtio_vdev_has_feature(&n->parent_obj, VIRTIO_NET_F_CTRL_MAC_ADDR)) {
2848c6aa
HJ
679 const struct iovec data = {
680 .iov_base = (void *)n->mac,
681 .iov_len = sizeof(n->mac),
682 };
f73c0c43
EP
683 ssize_t dev_written = vhost_vdpa_net_load_cmd(s, VIRTIO_NET_CTRL_MAC,
684 VIRTIO_NET_CTRL_MAC_ADDR_SET,
2848c6aa 685 &data, 1);
f73c0c43
EP
686 if (unlikely(dev_written < 0)) {
687 return dev_written;
688 }
b479bc3c
HJ
689 if (*s->status != VIRTIO_NET_OK) {
690 return -EIO;
691 }
f73c0c43
EP
692 }
693
0ddcecb8
HJ
694 /*
695 * According to VirtIO standard, "The device MUST have an
696 * empty MAC filtering table on reset.".
697 *
698 * Therefore, there is no need to send this CVQ command if the
699 * driver also sets an empty MAC filter table, which aligns with
700 * the device's defaults.
701 *
702 * Note that the device's defaults can mismatch the driver's
703 * configuration only at live migration.
704 */
705 if (!virtio_vdev_has_feature(&n->parent_obj, VIRTIO_NET_F_CTRL_RX) ||
706 n->mac_table.in_use == 0) {
707 return 0;
708 }
709
710 uint32_t uni_entries = n->mac_table.first_multi,
711 uni_macs_size = uni_entries * ETH_ALEN,
712 mul_entries = n->mac_table.in_use - uni_entries,
713 mul_macs_size = mul_entries * ETH_ALEN;
714 struct virtio_net_ctrl_mac uni = {
715 .entries = cpu_to_le32(uni_entries),
716 };
717 struct virtio_net_ctrl_mac mul = {
718 .entries = cpu_to_le32(mul_entries),
719 };
720 const struct iovec data[] = {
721 {
722 .iov_base = &uni,
723 .iov_len = sizeof(uni),
724 }, {
725 .iov_base = n->mac_table.macs,
726 .iov_len = uni_macs_size,
727 }, {
728 .iov_base = &mul,
729 .iov_len = sizeof(mul),
730 }, {
731 .iov_base = &n->mac_table.macs[uni_macs_size],
732 .iov_len = mul_macs_size,
733 },
734 };
735 ssize_t dev_written = vhost_vdpa_net_load_cmd(s,
736 VIRTIO_NET_CTRL_MAC,
737 VIRTIO_NET_CTRL_MAC_TABLE_SET,
738 data, ARRAY_SIZE(data));
739 if (unlikely(dev_written < 0)) {
740 return dev_written;
741 }
742 if (*s->status != VIRTIO_NET_OK) {
743 return -EIO;
744 }
745
f73c0c43
EP
746 return 0;
747}
748
f64c7cda
EP
749static int vhost_vdpa_net_load_mq(VhostVDPAState *s,
750 const VirtIONet *n)
751{
752 struct virtio_net_ctrl_mq mq;
f64c7cda
EP
753 ssize_t dev_written;
754
02d3bf09 755 if (!virtio_vdev_has_feature(&n->parent_obj, VIRTIO_NET_F_MQ)) {
f64c7cda
EP
756 return 0;
757 }
758
759 mq.virtqueue_pairs = cpu_to_le16(n->curr_queue_pairs);
2848c6aa
HJ
760 const struct iovec data = {
761 .iov_base = &mq,
762 .iov_len = sizeof(mq),
763 };
f64c7cda 764 dev_written = vhost_vdpa_net_load_cmd(s, VIRTIO_NET_CTRL_MQ,
2848c6aa
HJ
765 VIRTIO_NET_CTRL_MQ_VQ_PAIRS_SET,
766 &data, 1);
f64c7cda
EP
767 if (unlikely(dev_written < 0)) {
768 return dev_written;
769 }
f45fd95e
HJ
770 if (*s->status != VIRTIO_NET_OK) {
771 return -EIO;
772 }
f64c7cda 773
f45fd95e 774 return 0;
f64c7cda
EP
775}
776
0b58d368
HJ
777static int vhost_vdpa_net_load_offloads(VhostVDPAState *s,
778 const VirtIONet *n)
779{
780 uint64_t offloads;
781 ssize_t dev_written;
782
783 if (!virtio_vdev_has_feature(&n->parent_obj,
784 VIRTIO_NET_F_CTRL_GUEST_OFFLOADS)) {
785 return 0;
786 }
787
788 if (n->curr_guest_offloads == virtio_net_supported_guest_offloads(n)) {
789 /*
790 * According to VirtIO standard, "Upon feature negotiation
791 * corresponding offload gets enabled to preserve
792 * backward compatibility.".
793 *
794 * Therefore, there is no need to send this CVQ command if the
795 * driver also enables all supported offloads, which aligns with
796 * the device's defaults.
797 *
798 * Note that the device's defaults can mismatch the driver's
799 * configuration only at live migration.
800 */
801 return 0;
802 }
803
804 offloads = cpu_to_le64(n->curr_guest_offloads);
2848c6aa
HJ
805 const struct iovec data = {
806 .iov_base = &offloads,
807 .iov_len = sizeof(offloads),
808 };
0b58d368
HJ
809 dev_written = vhost_vdpa_net_load_cmd(s, VIRTIO_NET_CTRL_GUEST_OFFLOADS,
810 VIRTIO_NET_CTRL_GUEST_OFFLOADS_SET,
2848c6aa 811 &data, 1);
0b58d368
HJ
812 if (unlikely(dev_written < 0)) {
813 return dev_written;
814 }
6f348071
HJ
815 if (*s->status != VIRTIO_NET_OK) {
816 return -EIO;
817 }
0b58d368 818
6f348071 819 return 0;
0b58d368
HJ
820}
821
b12f907e
HJ
822static int vhost_vdpa_net_load_rx_mode(VhostVDPAState *s,
823 uint8_t cmd,
824 uint8_t on)
825{
826 const struct iovec data = {
827 .iov_base = &on,
828 .iov_len = sizeof(on),
829 };
830 return vhost_vdpa_net_load_cmd(s, VIRTIO_NET_CTRL_RX,
831 cmd, &data, 1);
832}
833
834static int vhost_vdpa_net_load_rx(VhostVDPAState *s,
835 const VirtIONet *n)
836{
837 ssize_t dev_written;
838
839 if (!virtio_vdev_has_feature(&n->parent_obj, VIRTIO_NET_F_CTRL_RX)) {
840 return 0;
841 }
842
843 /*
844 * According to virtio_net_reset(), device turns promiscuous mode
845 * on by default.
846 *
0a19d879 847 * Additionally, according to VirtIO standard, "Since there are
b12f907e
HJ
848 * no guarantees, it can use a hash filter or silently switch to
849 * allmulti or promiscuous mode if it is given too many addresses.".
850 * QEMU marks `n->mac_table.uni_overflow` if guest sets too many
851 * non-multicast MAC addresses, indicating that promiscuous mode
852 * should be enabled.
853 *
854 * Therefore, QEMU should only send this CVQ command if the
855 * `n->mac_table.uni_overflow` is not marked and `n->promisc` is off,
856 * which sets promiscuous mode on, different from the device's defaults.
857 *
858 * Note that the device's defaults can mismatch the driver's
859 * configuration only at live migration.
860 */
861 if (!n->mac_table.uni_overflow && !n->promisc) {
862 dev_written = vhost_vdpa_net_load_rx_mode(s,
863 VIRTIO_NET_CTRL_RX_PROMISC, 0);
864 if (unlikely(dev_written < 0)) {
865 return dev_written;
866 }
867 if (*s->status != VIRTIO_NET_OK) {
868 return -EIO;
869 }
870 }
871
872 /*
873 * According to virtio_net_reset(), device turns all-multicast mode
874 * off by default.
875 *
876 * According to VirtIO standard, "Since there are no guarantees,
877 * it can use a hash filter or silently switch to allmulti or
878 * promiscuous mode if it is given too many addresses.". QEMU marks
879 * `n->mac_table.multi_overflow` if guest sets too many
880 * non-multicast MAC addresses.
881 *
882 * Therefore, QEMU should only send this CVQ command if the
883 * `n->mac_table.multi_overflow` is marked or `n->allmulti` is on,
884 * which sets all-multicast mode on, different from the device's defaults.
885 *
886 * Note that the device's defaults can mismatch the driver's
887 * configuration only at live migration.
888 */
889 if (n->mac_table.multi_overflow || n->allmulti) {
890 dev_written = vhost_vdpa_net_load_rx_mode(s,
891 VIRTIO_NET_CTRL_RX_ALLMULTI, 1);
892 if (unlikely(dev_written < 0)) {
893 return dev_written;
894 }
895 if (*s->status != VIRTIO_NET_OK) {
896 return -EIO;
897 }
898 }
899
4fd180c7
HJ
900 if (!virtio_vdev_has_feature(&n->parent_obj, VIRTIO_NET_F_CTRL_RX_EXTRA)) {
901 return 0;
902 }
903
904 /*
905 * According to virtio_net_reset(), device turns all-unicast mode
906 * off by default.
907 *
908 * Therefore, QEMU should only send this CVQ command if the driver
909 * sets all-unicast mode on, different from the device's defaults.
910 *
911 * Note that the device's defaults can mismatch the driver's
912 * configuration only at live migration.
913 */
914 if (n->alluni) {
915 dev_written = vhost_vdpa_net_load_rx_mode(s,
916 VIRTIO_NET_CTRL_RX_ALLUNI, 1);
917 if (dev_written < 0) {
918 return dev_written;
919 }
920 if (*s->status != VIRTIO_NET_OK) {
921 return -EIO;
922 }
923 }
924
925 /*
926 * According to virtio_net_reset(), device turns non-multicast mode
927 * off by default.
928 *
929 * Therefore, QEMU should only send this CVQ command if the driver
930 * sets non-multicast mode on, different from the device's defaults.
931 *
932 * Note that the device's defaults can mismatch the driver's
933 * configuration only at live migration.
934 */
935 if (n->nomulti) {
936 dev_written = vhost_vdpa_net_load_rx_mode(s,
937 VIRTIO_NET_CTRL_RX_NOMULTI, 1);
938 if (dev_written < 0) {
939 return dev_written;
940 }
941 if (*s->status != VIRTIO_NET_OK) {
942 return -EIO;
943 }
944 }
945
946 /*
947 * According to virtio_net_reset(), device turns non-unicast mode
948 * off by default.
949 *
950 * Therefore, QEMU should only send this CVQ command if the driver
951 * sets non-unicast mode on, different from the device's defaults.
952 *
953 * Note that the device's defaults can mismatch the driver's
954 * configuration only at live migration.
955 */
956 if (n->nouni) {
957 dev_written = vhost_vdpa_net_load_rx_mode(s,
958 VIRTIO_NET_CTRL_RX_NOUNI, 1);
959 if (dev_written < 0) {
960 return dev_written;
961 }
962 if (*s->status != VIRTIO_NET_OK) {
963 return -EIO;
964 }
965 }
966
967 /*
968 * According to virtio_net_reset(), device turns non-broadcast mode
969 * off by default.
970 *
971 * Therefore, QEMU should only send this CVQ command if the driver
972 * sets non-broadcast mode on, different from the device's defaults.
973 *
974 * Note that the device's defaults can mismatch the driver's
975 * configuration only at live migration.
976 */
977 if (n->nobcast) {
978 dev_written = vhost_vdpa_net_load_rx_mode(s,
979 VIRTIO_NET_CTRL_RX_NOBCAST, 1);
980 if (dev_written < 0) {
981 return dev_written;
982 }
983 if (*s->status != VIRTIO_NET_OK) {
984 return -EIO;
985 }
986 }
987
b12f907e
HJ
988 return 0;
989}
990
8f7e9967
HJ
991static int vhost_vdpa_net_load_single_vlan(VhostVDPAState *s,
992 const VirtIONet *n,
993 uint16_t vid)
994{
995 const struct iovec data = {
996 .iov_base = &vid,
997 .iov_len = sizeof(vid),
998 };
999 ssize_t dev_written = vhost_vdpa_net_load_cmd(s, VIRTIO_NET_CTRL_VLAN,
1000 VIRTIO_NET_CTRL_VLAN_ADD,
1001 &data, 1);
1002 if (unlikely(dev_written < 0)) {
1003 return dev_written;
1004 }
1005 if (unlikely(*s->status != VIRTIO_NET_OK)) {
1006 return -EIO;
1007 }
1008
1009 return 0;
1010}
1011
1012static int vhost_vdpa_net_load_vlan(VhostVDPAState *s,
1013 const VirtIONet *n)
1014{
1015 int r;
1016
1017 if (!virtio_vdev_has_feature(&n->parent_obj, VIRTIO_NET_F_CTRL_VLAN)) {
1018 return 0;
1019 }
1020
1021 for (int i = 0; i < MAX_VLAN >> 5; i++) {
1022 for (int j = 0; n->vlans[i] && j <= 0x1f; j++) {
1023 if (n->vlans[i] & (1U << j)) {
1024 r = vhost_vdpa_net_load_single_vlan(s, n, (i << 5) + j);
1025 if (unlikely(r != 0)) {
1026 return r;
1027 }
1028 }
1029 }
1030 }
1031
1032 return 0;
1033}
1034
f3fada59 1035static int vhost_vdpa_net_cvq_load(NetClientState *nc)
dd036d8d
EP
1036{
1037 VhostVDPAState *s = DO_UPCAST(VhostVDPAState, nc, nc);
f73c0c43 1038 struct vhost_vdpa *v = &s->vhost_vdpa;
dd036d8d 1039 const VirtIONet *n;
f73c0c43 1040 int r;
dd036d8d
EP
1041
1042 assert(nc->info->type == NET_CLIENT_DRIVER_VHOST_VDPA);
1043
6c482547 1044 vhost_vdpa_set_vring_ready(v, v->dev->vq_index);
dd036d8d 1045
6c482547
EP
1046 if (v->shadow_vqs_enabled) {
1047 n = VIRTIO_NET(v->dev->vdev);
1048 r = vhost_vdpa_net_load_mac(s, n);
1049 if (unlikely(r < 0)) {
1050 return r;
1051 }
1052 r = vhost_vdpa_net_load_mq(s, n);
1053 if (unlikely(r)) {
1054 return r;
1055 }
1056 r = vhost_vdpa_net_load_offloads(s, n);
1057 if (unlikely(r)) {
1058 return r;
1059 }
1060 r = vhost_vdpa_net_load_rx(s, n);
1061 if (unlikely(r)) {
1062 return r;
1063 }
1064 r = vhost_vdpa_net_load_vlan(s, n);
1065 if (unlikely(r)) {
1066 return r;
1067 }
b12f907e 1068 }
6c482547
EP
1069
1070 for (int i = 0; i < v->dev->vq_index; ++i) {
1071 vhost_vdpa_set_vring_ready(v, i);
8f7e9967 1072 }
dd036d8d
EP
1073
1074 return 0;
1075}
1076
f8972b56
EP
1077static NetClientInfo net_vhost_vdpa_cvq_info = {
1078 .type = NET_CLIENT_DRIVER_VHOST_VDPA,
1079 .size = sizeof(VhostVDPAState),
1080 .receive = vhost_vdpa_receive,
7a7f87e9 1081 .start = vhost_vdpa_net_cvq_start,
f3fada59 1082 .load = vhost_vdpa_net_cvq_load,
7a7f87e9 1083 .stop = vhost_vdpa_net_cvq_stop,
f8972b56
EP
1084 .cleanup = vhost_vdpa_cleanup,
1085 .has_vnet_hdr = vhost_vdpa_has_vnet_hdr,
1086 .has_ufo = vhost_vdpa_has_ufo,
1087 .check_peer_type = vhost_vdpa_check_peer_type,
1088};
1089
fee364e4
HJ
1090/*
1091 * Forward the excessive VIRTIO_NET_CTRL_MAC_TABLE_SET CVQ command to
1092 * vdpa device.
1093 *
1094 * Considering that QEMU cannot send the entire filter table to the
1095 * vdpa device, it should send the VIRTIO_NET_CTRL_RX_PROMISC CVQ
1096 * command to enable promiscuous mode to receive all packets,
1097 * according to VirtIO standard, "Since there are no guarantees,
1098 * it can use a hash filter or silently switch to allmulti or
1099 * promiscuous mode if it is given too many addresses.".
1100 *
1101 * Since QEMU ignores MAC addresses beyond `MAC_TABLE_ENTRIES` and
1102 * marks `n->mac_table.x_overflow` accordingly, it should have
1103 * the same effect on the device model to receive
1104 * (`MAC_TABLE_ENTRIES` + 1) or more non-multicast MAC addresses.
1105 * The same applies to multicast MAC addresses.
1106 *
1107 * Therefore, QEMU can provide the device model with a fake
1108 * VIRTIO_NET_CTRL_MAC_TABLE_SET command with (`MAC_TABLE_ENTRIES` + 1)
1109 * non-multicast MAC addresses and (`MAC_TABLE_ENTRIES` + 1) multicast
1110 * MAC addresses. This ensures that the device model marks
1111 * `n->mac_table.uni_overflow` and `n->mac_table.multi_overflow`,
1112 * allowing all packets to be received, which aligns with the
1113 * state of the vdpa device.
1114 */
1115static int vhost_vdpa_net_excessive_mac_filter_cvq_add(VhostVDPAState *s,
1116 VirtQueueElement *elem,
1117 struct iovec *out)
1118{
1119 struct virtio_net_ctrl_mac mac_data, *mac_ptr;
1120 struct virtio_net_ctrl_hdr *hdr_ptr;
1121 uint32_t cursor;
1122 ssize_t r;
1123
1124 /* parse the non-multicast MAC address entries from CVQ command */
1125 cursor = sizeof(*hdr_ptr);
1126 r = iov_to_buf(elem->out_sg, elem->out_num, cursor,
1127 &mac_data, sizeof(mac_data));
1128 if (unlikely(r != sizeof(mac_data))) {
1129 /*
1130 * If the CVQ command is invalid, we should simulate the vdpa device
1131 * to reject the VIRTIO_NET_CTRL_MAC_TABLE_SET CVQ command
1132 */
1133 *s->status = VIRTIO_NET_ERR;
1134 return sizeof(*s->status);
1135 }
1136 cursor += sizeof(mac_data) + le32_to_cpu(mac_data.entries) * ETH_ALEN;
1137
1138 /* parse the multicast MAC address entries from CVQ command */
1139 r = iov_to_buf(elem->out_sg, elem->out_num, cursor,
1140 &mac_data, sizeof(mac_data));
1141 if (r != sizeof(mac_data)) {
1142 /*
1143 * If the CVQ command is invalid, we should simulate the vdpa device
1144 * to reject the VIRTIO_NET_CTRL_MAC_TABLE_SET CVQ command
1145 */
1146 *s->status = VIRTIO_NET_ERR;
1147 return sizeof(*s->status);
1148 }
1149 cursor += sizeof(mac_data) + le32_to_cpu(mac_data.entries) * ETH_ALEN;
1150
1151 /* validate the CVQ command */
1152 if (iov_size(elem->out_sg, elem->out_num) != cursor) {
1153 /*
1154 * If the CVQ command is invalid, we should simulate the vdpa device
1155 * to reject the VIRTIO_NET_CTRL_MAC_TABLE_SET CVQ command
1156 */
1157 *s->status = VIRTIO_NET_ERR;
1158 return sizeof(*s->status);
1159 }
1160
1161 /*
1162 * According to VirtIO standard, "Since there are no guarantees,
1163 * it can use a hash filter or silently switch to allmulti or
1164 * promiscuous mode if it is given too many addresses.".
1165 *
1166 * Therefore, considering that QEMU is unable to send the entire
1167 * filter table to the vdpa device, it should send the
1168 * VIRTIO_NET_CTRL_RX_PROMISC CVQ command to enable promiscuous mode
1169 */
1170 r = vhost_vdpa_net_load_rx_mode(s, VIRTIO_NET_CTRL_RX_PROMISC, 1);
1171 if (unlikely(r < 0)) {
1172 return r;
1173 }
1174 if (*s->status != VIRTIO_NET_OK) {
1175 return sizeof(*s->status);
1176 }
1177
1178 /*
1179 * QEMU should also send a fake VIRTIO_NET_CTRL_MAC_TABLE_SET CVQ
1180 * command to the device model, including (`MAC_TABLE_ENTRIES` + 1)
1181 * non-multicast MAC addresses and (`MAC_TABLE_ENTRIES` + 1)
1182 * multicast MAC addresses.
1183 *
1184 * By doing so, the device model can mark `n->mac_table.uni_overflow`
1185 * and `n->mac_table.multi_overflow`, enabling all packets to be
1186 * received, which aligns with the state of the vdpa device.
1187 */
1188 cursor = 0;
1189 uint32_t fake_uni_entries = MAC_TABLE_ENTRIES + 1,
1190 fake_mul_entries = MAC_TABLE_ENTRIES + 1,
1191 fake_cvq_size = sizeof(struct virtio_net_ctrl_hdr) +
1192 sizeof(mac_data) + fake_uni_entries * ETH_ALEN +
1193 sizeof(mac_data) + fake_mul_entries * ETH_ALEN;
1194
1195 assert(fake_cvq_size < vhost_vdpa_net_cvq_cmd_page_len());
1196 out->iov_len = fake_cvq_size;
1197
1198 /* pack the header for fake CVQ command */
1199 hdr_ptr = out->iov_base + cursor;
1200 hdr_ptr->class = VIRTIO_NET_CTRL_MAC;
1201 hdr_ptr->cmd = VIRTIO_NET_CTRL_MAC_TABLE_SET;
1202 cursor += sizeof(*hdr_ptr);
1203
1204 /*
1205 * Pack the non-multicast MAC addresses part for fake CVQ command.
1206 *
1207 * According to virtio_net_handle_mac(), QEMU doesn't verify the MAC
0a19d879 1208 * addresses provided in CVQ command. Therefore, only the entries
fee364e4
HJ
1209 * field need to be prepared in the CVQ command.
1210 */
1211 mac_ptr = out->iov_base + cursor;
1212 mac_ptr->entries = cpu_to_le32(fake_uni_entries);
1213 cursor += sizeof(*mac_ptr) + fake_uni_entries * ETH_ALEN;
1214
1215 /*
1216 * Pack the multicast MAC addresses part for fake CVQ command.
1217 *
1218 * According to virtio_net_handle_mac(), QEMU doesn't verify the MAC
0a19d879 1219 * addresses provided in CVQ command. Therefore, only the entries
fee364e4
HJ
1220 * field need to be prepared in the CVQ command.
1221 */
1222 mac_ptr = out->iov_base + cursor;
1223 mac_ptr->entries = cpu_to_le32(fake_mul_entries);
1224
1225 /*
1226 * Simulating QEMU poll a vdpa device used buffer
1227 * for VIRTIO_NET_CTRL_MAC_TABLE_SET CVQ command
1228 */
1229 return sizeof(*s->status);
1230}
1231
2df4dd31
EP
1232/**
1233 * Validate and copy control virtqueue commands.
1234 *
1235 * Following QEMU guidelines, we offer a copy of the buffers to the device to
1236 * prevent TOCTOU bugs.
bd907ae4
EP
1237 */
1238static int vhost_vdpa_net_handle_ctrl_avail(VhostShadowVirtqueue *svq,
1239 VirtQueueElement *elem,
1240 void *opaque)
1241{
2df4dd31 1242 VhostVDPAState *s = opaque;
be4278b6 1243 size_t in_len;
45c41018 1244 const struct virtio_net_ctrl_hdr *ctrl;
bd907ae4 1245 virtio_net_ctrl_ack status = VIRTIO_NET_ERR;
7a7f87e9
EP
1246 /* Out buffer sent to both the vdpa device and the device model */
1247 struct iovec out = {
1248 .iov_base = s->cvq_cmd_out_buffer,
1249 };
2df4dd31
EP
1250 /* in buffer used for device model */
1251 const struct iovec in = {
1252 .iov_base = &status,
1253 .iov_len = sizeof(status),
1254 };
be4278b6 1255 ssize_t dev_written = -EINVAL;
2df4dd31 1256
7a7f87e9
EP
1257 out.iov_len = iov_to_buf(elem->out_sg, elem->out_num, 0,
1258 s->cvq_cmd_out_buffer,
fee364e4 1259 vhost_vdpa_net_cvq_cmd_page_len());
45c41018
HJ
1260
1261 ctrl = s->cvq_cmd_out_buffer;
1262 if (ctrl->class == VIRTIO_NET_CTRL_ANNOUNCE) {
3f9a3eeb
EP
1263 /*
1264 * Guest announce capability is emulated by qemu, so don't forward to
1265 * the device.
1266 */
1267 dev_written = sizeof(status);
1268 *s->status = VIRTIO_NET_OK;
fee364e4
HJ
1269 } else if (unlikely(ctrl->class == VIRTIO_NET_CTRL_MAC &&
1270 ctrl->cmd == VIRTIO_NET_CTRL_MAC_TABLE_SET &&
1271 iov_size(elem->out_sg, elem->out_num) > out.iov_len)) {
1272 /*
1273 * Due to the size limitation of the out buffer sent to the vdpa device,
1274 * which is determined by vhost_vdpa_net_cvq_cmd_page_len(), excessive
1275 * MAC addresses set by the driver for the filter table can cause
1276 * truncation of the CVQ command in QEMU. As a result, the vdpa device
1277 * rejects the flawed CVQ command.
1278 *
1279 * Therefore, QEMU must handle this situation instead of sending
0a19d879 1280 * the CVQ command directly.
fee364e4
HJ
1281 */
1282 dev_written = vhost_vdpa_net_excessive_mac_filter_cvq_add(s, elem,
1283 &out);
1284 if (unlikely(dev_written < 0)) {
1285 goto out;
1286 }
3f9a3eeb
EP
1287 } else {
1288 dev_written = vhost_vdpa_net_cvq_add(s, out.iov_len, sizeof(status));
1289 if (unlikely(dev_written < 0)) {
1290 goto out;
1291 }
bd907ae4
EP
1292 }
1293
bd907ae4
EP
1294 if (unlikely(dev_written < sizeof(status))) {
1295 error_report("Insufficient written data (%zu)", dev_written);
2df4dd31
EP
1296 goto out;
1297 }
1298
17fb889f 1299 if (*s->status != VIRTIO_NET_OK) {
d45243bc 1300 goto out;
2df4dd31
EP
1301 }
1302
1303 status = VIRTIO_NET_ERR;
7a7f87e9 1304 virtio_net_handle_ctrl_iov(svq->vdev, &in, 1, &out, 1);
2df4dd31
EP
1305 if (status != VIRTIO_NET_OK) {
1306 error_report("Bad CVQ processing in model");
bd907ae4
EP
1307 }
1308
1309out:
1310 in_len = iov_from_buf(elem->in_sg, elem->in_num, 0, &status,
1311 sizeof(status));
1312 if (unlikely(in_len < sizeof(status))) {
1313 error_report("Bad device CVQ written length");
1314 }
1315 vhost_svq_push_elem(svq, elem, MIN(in_len, sizeof(status)));
031b1aba
HJ
1316 /*
1317 * `elem` belongs to vhost_vdpa_net_handle_ctrl_avail() only when
1318 * the function successfully forwards the CVQ command, indicated
1319 * by a non-negative value of `dev_written`. Otherwise, it still
1320 * belongs to SVQ.
1321 * This function should only free the `elem` when it owns.
1322 */
1323 if (dev_written >= 0) {
1324 g_free(elem);
1325 }
be4278b6 1326 return dev_written < 0 ? dev_written : 0;
bd907ae4
EP
1327}
1328
1329static const VhostShadowVirtqueueOps vhost_vdpa_net_svq_ops = {
1330 .avail_handler = vhost_vdpa_net_handle_ctrl_avail,
1331};
1332
152128d6
EP
1333/**
1334 * Probe if CVQ is isolated
1335 *
1336 * @device_fd The vdpa device fd
1337 * @features Features offered by the device.
1338 * @cvq_index The control vq pair index
1339 *
1340 * Returns <0 in case of failure, 0 if false and 1 if true.
1341 */
1342static int vhost_vdpa_probe_cvq_isolation(int device_fd, uint64_t features,
1343 int cvq_index, Error **errp)
1344{
1345 uint64_t backend_features;
1346 int64_t cvq_group;
1347 uint8_t status = VIRTIO_CONFIG_S_ACKNOWLEDGE |
845ec38a 1348 VIRTIO_CONFIG_S_DRIVER;
152128d6
EP
1349 int r;
1350
1351 ERRP_GUARD();
1352
1353 r = ioctl(device_fd, VHOST_GET_BACKEND_FEATURES, &backend_features);
1354 if (unlikely(r < 0)) {
1355 error_setg_errno(errp, errno, "Cannot get vdpa backend_features");
1356 return r;
1357 }
1358
1359 if (!(backend_features & BIT_ULL(VHOST_BACKEND_F_IOTLB_ASID))) {
1360 return 0;
1361 }
1362
845ec38a
EP
1363 r = ioctl(device_fd, VHOST_VDPA_SET_STATUS, &status);
1364 if (unlikely(r)) {
1365 error_setg_errno(errp, -r, "Cannot set device status");
1366 goto out;
1367 }
1368
152128d6
EP
1369 r = ioctl(device_fd, VHOST_SET_FEATURES, &features);
1370 if (unlikely(r)) {
845ec38a 1371 error_setg_errno(errp, -r, "Cannot set features");
f1085882 1372 goto out;
152128d6
EP
1373 }
1374
845ec38a 1375 status |= VIRTIO_CONFIG_S_FEATURES_OK;
152128d6
EP
1376 r = ioctl(device_fd, VHOST_VDPA_SET_STATUS, &status);
1377 if (unlikely(r)) {
845ec38a 1378 error_setg_errno(errp, -r, "Cannot set device status");
152128d6
EP
1379 goto out;
1380 }
1381
1382 cvq_group = vhost_vdpa_get_vring_group(device_fd, cvq_index, errp);
1383 if (unlikely(cvq_group < 0)) {
1384 if (cvq_group != -ENOTSUP) {
1385 r = cvq_group;
1386 goto out;
1387 }
1388
1389 /*
1390 * The kernel report VHOST_BACKEND_F_IOTLB_ASID if the vdpa frontend
1391 * support ASID even if the parent driver does not. The CVQ cannot be
1392 * isolated in this case.
1393 */
1394 error_free(*errp);
1395 *errp = NULL;
1396 r = 0;
1397 goto out;
1398 }
1399
1400 for (int i = 0; i < cvq_index; ++i) {
1401 int64_t group = vhost_vdpa_get_vring_group(device_fd, i, errp);
1402 if (unlikely(group < 0)) {
1403 r = group;
1404 goto out;
1405 }
1406
1407 if (group == (int64_t)cvq_group) {
1408 r = 0;
1409 goto out;
1410 }
1411 }
1412
1413 r = 1;
1414
1415out:
1416 status = 0;
1417 ioctl(device_fd, VHOST_VDPA_SET_STATUS, &status);
1418 return r;
1419}
1420
654790b6 1421static NetClientState *net_vhost_vdpa_init(NetClientState *peer,
a585fad2
EP
1422 const char *device,
1423 const char *name,
1424 int vdpa_device_fd,
1425 int queue_pair_index,
1426 int nvqs,
1427 bool is_datapath,
1428 bool svq,
5c1ebd4c 1429 struct vhost_vdpa_iova_range iova_range,
152128d6
EP
1430 uint64_t features,
1431 Error **errp)
1e0a84ea
CL
1432{
1433 NetClientState *nc = NULL;
1434 VhostVDPAState *s;
1e0a84ea
CL
1435 int ret = 0;
1436 assert(name);
e77db790 1437 int cvq_isolated = 0;
152128d6 1438
40237840
JW
1439 if (is_datapath) {
1440 nc = qemu_new_net_client(&net_vhost_vdpa_info, peer, device,
1441 name);
1442 } else {
152128d6
EP
1443 cvq_isolated = vhost_vdpa_probe_cvq_isolation(vdpa_device_fd, features,
1444 queue_pair_index * 2,
1445 errp);
1446 if (unlikely(cvq_isolated < 0)) {
1447 return NULL;
1448 }
1449
f8972b56 1450 nc = qemu_new_net_control_client(&net_vhost_vdpa_cvq_info, peer,
40237840
JW
1451 device, name);
1452 }
53b85d95 1453 qemu_set_info_str(nc, TYPE_VHOST_VDPA);
1e0a84ea 1454 s = DO_UPCAST(VhostVDPAState, nc, nc);
7327813d 1455
1e0a84ea 1456 s->vhost_vdpa.device_fd = vdpa_device_fd;
40237840 1457 s->vhost_vdpa.index = queue_pair_index;
7f211a28 1458 s->always_svq = svq;
69498430 1459 s->migration_state.notify = vdpa_net_migration_state_notifier;
1576dbb5 1460 s->vhost_vdpa.shadow_vqs_enabled = svq;
a585fad2 1461 s->vhost_vdpa.iova_range = iova_range;
6188d78a 1462 s->vhost_vdpa.shadow_data = svq;
5c1ebd4c
EP
1463 if (queue_pair_index == 0) {
1464 vhost_vdpa_net_valid_svq_features(features,
1465 &s->vhost_vdpa.migration_blocker);
1466 } else if (!is_datapath) {
babf8b87
EP
1467 s->cvq_cmd_out_buffer = mmap(NULL, vhost_vdpa_net_cvq_cmd_page_len(),
1468 PROT_READ | PROT_WRITE,
1469 MAP_SHARED | MAP_ANONYMOUS, -1, 0);
1470 s->status = mmap(NULL, vhost_vdpa_net_cvq_cmd_page_len(),
1471 PROT_READ | PROT_WRITE, MAP_SHARED | MAP_ANONYMOUS,
1472 -1, 0);
2df4dd31 1473
bd907ae4
EP
1474 s->vhost_vdpa.shadow_vq_ops = &vhost_vdpa_net_svq_ops;
1475 s->vhost_vdpa.shadow_vq_ops_opaque = s;
152128d6 1476 s->cvq_isolated = cvq_isolated;
bd907ae4 1477 }
40237840 1478 ret = vhost_vdpa_add(nc, (void *)&s->vhost_vdpa, queue_pair_index, nvqs);
74af5eec 1479 if (ret) {
74af5eec 1480 qemu_del_net_client(nc);
654790b6 1481 return NULL;
74af5eec 1482 }
654790b6 1483 return nc;
1e0a84ea
CL
1484}
1485
8170ab3f
EP
1486static int vhost_vdpa_get_features(int fd, uint64_t *features, Error **errp)
1487{
1488 int ret = ioctl(fd, VHOST_GET_FEATURES, features);
1489 if (unlikely(ret < 0)) {
1490 error_setg_errno(errp, errno,
1491 "Fail to query features from vhost-vDPA device");
1492 }
1493 return ret;
1494}
1495
1496static int vhost_vdpa_get_max_queue_pairs(int fd, uint64_t features,
1497 int *has_cvq, Error **errp)
40237840
JW
1498{
1499 unsigned long config_size = offsetof(struct vhost_vdpa_config, buf);
cd523a41 1500 g_autofree struct vhost_vdpa_config *config = NULL;
40237840 1501 __virtio16 *max_queue_pairs;
40237840
JW
1502 int ret;
1503
40237840
JW
1504 if (features & (1 << VIRTIO_NET_F_CTRL_VQ)) {
1505 *has_cvq = 1;
1506 } else {
1507 *has_cvq = 0;
1508 }
1509
1510 if (features & (1 << VIRTIO_NET_F_MQ)) {
1511 config = g_malloc0(config_size + sizeof(*max_queue_pairs));
1512 config->off = offsetof(struct virtio_net_config, max_virtqueue_pairs);
1513 config->len = sizeof(*max_queue_pairs);
1514
1515 ret = ioctl(fd, VHOST_VDPA_GET_CONFIG, config);
1516 if (ret) {
1517 error_setg(errp, "Fail to get config from vhost-vDPA device");
1518 return -ret;
1519 }
1520
1521 max_queue_pairs = (__virtio16 *)&config->buf;
1522
1523 return lduw_le_p(max_queue_pairs);
1524 }
1525
1526 return 1;
1527}
1528
1e0a84ea
CL
1529int net_init_vhost_vdpa(const Netdev *netdev, const char *name,
1530 NetClientState *peer, Error **errp)
1531{
1532 const NetdevVhostVDPAOptions *opts;
8170ab3f 1533 uint64_t features;
654790b6 1534 int vdpa_device_fd;
eb3cb751 1535 g_autofree NetClientState **ncs = NULL;
a585fad2 1536 struct vhost_vdpa_iova_range iova_range;
eb3cb751 1537 NetClientState *nc;
aed5da45 1538 int queue_pairs, r, i = 0, has_cvq = 0;
1e0a84ea
CL
1539
1540 assert(netdev->type == NET_CLIENT_DRIVER_VHOST_VDPA);
1541 opts = &netdev->u.vhost_vdpa;
7480874a 1542 if (!opts->vhostdev && !opts->vhostfd) {
8801ccd0
SWL
1543 error_setg(errp,
1544 "vhost-vdpa: neither vhostdev= nor vhostfd= was specified");
c8295404
EP
1545 return -1;
1546 }
7327813d 1547
7480874a 1548 if (opts->vhostdev && opts->vhostfd) {
8801ccd0
SWL
1549 error_setg(errp,
1550 "vhost-vdpa: vhostdev= and vhostfd= are mutually exclusive");
1551 return -1;
1552 }
1553
7480874a 1554 if (opts->vhostdev) {
8801ccd0
SWL
1555 vdpa_device_fd = qemu_open(opts->vhostdev, O_RDWR, errp);
1556 if (vdpa_device_fd == -1) {
1557 return -errno;
1558 }
5107fd3e
PM
1559 } else {
1560 /* has_vhostfd */
8801ccd0
SWL
1561 vdpa_device_fd = monitor_fd_param(monitor_cur(), opts->vhostfd, errp);
1562 if (vdpa_device_fd == -1) {
1563 error_prepend(errp, "vhost-vdpa: unable to parse vhostfd: ");
1564 return -1;
1565 }
7327813d
JW
1566 }
1567
8170ab3f
EP
1568 r = vhost_vdpa_get_features(vdpa_device_fd, &features, errp);
1569 if (unlikely(r < 0)) {
aed5da45 1570 goto err;
8170ab3f
EP
1571 }
1572
1573 queue_pairs = vhost_vdpa_get_max_queue_pairs(vdpa_device_fd, features,
40237840
JW
1574 &has_cvq, errp);
1575 if (queue_pairs < 0) {
7327813d 1576 qemu_close(vdpa_device_fd);
40237840
JW
1577 return queue_pairs;
1578 }
1579
bf7a2ad8
LM
1580 r = vhost_vdpa_get_iova_range(vdpa_device_fd, &iova_range);
1581 if (unlikely(r < 0)) {
1582 error_setg(errp, "vhost-vdpa: get iova range failed: %s",
1583 strerror(-r));
1584 goto err;
1585 }
1586
00ef422e
EP
1587 if (opts->x_svq && !vhost_vdpa_net_valid_svq_features(features, errp)) {
1588 goto err;
1576dbb5
EP
1589 }
1590
40237840
JW
1591 ncs = g_malloc0(sizeof(*ncs) * queue_pairs);
1592
1593 for (i = 0; i < queue_pairs; i++) {
1594 ncs[i] = net_vhost_vdpa_init(peer, TYPE_VHOST_VDPA, name,
1576dbb5 1595 vdpa_device_fd, i, 2, true, opts->x_svq,
152128d6 1596 iova_range, features, errp);
40237840
JW
1597 if (!ncs[i])
1598 goto err;
7327813d
JW
1599 }
1600
40237840
JW
1601 if (has_cvq) {
1602 nc = net_vhost_vdpa_init(peer, TYPE_VHOST_VDPA, name,
1576dbb5 1603 vdpa_device_fd, i, 1, false,
152128d6 1604 opts->x_svq, iova_range, features, errp);
40237840
JW
1605 if (!nc)
1606 goto err;
1607 }
1608
654790b6 1609 return 0;
40237840
JW
1610
1611err:
1612 if (i) {
9bd05507
SWL
1613 for (i--; i >= 0; i--) {
1614 qemu_del_net_client(ncs[i]);
1615 }
40237840 1616 }
1576dbb5 1617
40237840 1618 qemu_close(vdpa_device_fd);
40237840
JW
1619
1620 return -1;
1e0a84ea 1621}