]>
Commit | Line | Data |
---|---|---|
1e0a84ea CL |
1 | /* |
2 | * vhost-vdpa.c | |
3 | * | |
4 | * Copyright(c) 2017-2018 Intel Corporation. | |
5 | * Copyright(c) 2020 Red Hat, Inc. | |
6 | * | |
7 | * This work is licensed under the terms of the GNU GPL, version 2 or later. | |
8 | * See the COPYING file in the top-level directory. | |
9 | * | |
10 | */ | |
11 | ||
12 | #include "qemu/osdep.h" | |
13 | #include "clients.h" | |
bd907ae4 | 14 | #include "hw/virtio/virtio-net.h" |
1e0a84ea CL |
15 | #include "net/vhost_net.h" |
16 | #include "net/vhost-vdpa.h" | |
17 | #include "hw/virtio/vhost-vdpa.h" | |
18 | #include "qemu/config-file.h" | |
19 | #include "qemu/error-report.h" | |
bd907ae4 EP |
20 | #include "qemu/log.h" |
21 | #include "qemu/memalign.h" | |
1e0a84ea CL |
22 | #include "qemu/option.h" |
23 | #include "qapi/error.h" | |
40237840 | 24 | #include <linux/vhost.h> |
1e0a84ea CL |
25 | #include <sys/ioctl.h> |
26 | #include <err.h> | |
27 | #include "standard-headers/linux/virtio_net.h" | |
28 | #include "monitor/monitor.h" | |
69498430 EP |
29 | #include "migration/migration.h" |
30 | #include "migration/misc.h" | |
1e0a84ea CL |
31 | #include "hw/virtio/vhost.h" |
32 | ||
33 | /* Todo:need to add the multiqueue support here */ | |
34 | typedef struct VhostVDPAState { | |
35 | NetClientState nc; | |
36 | struct vhost_vdpa vhost_vdpa; | |
69498430 | 37 | Notifier migration_state; |
1e0a84ea | 38 | VHostNetState *vhost_net; |
2df4dd31 EP |
39 | |
40 | /* Control commands shadow buffers */ | |
17fb889f EP |
41 | void *cvq_cmd_out_buffer; |
42 | virtio_net_ctrl_ack *status; | |
43 | ||
7f211a28 EP |
44 | /* The device always have SVQ enabled */ |
45 | bool always_svq; | |
152128d6 EP |
46 | |
47 | /* The device can isolate CVQ in its own ASID */ | |
48 | bool cvq_isolated; | |
49 | ||
1e0a84ea CL |
50 | bool started; |
51 | } VhostVDPAState; | |
52 | ||
2875a0ca HJ |
53 | /* |
54 | * The array is sorted alphabetically in ascending order, | |
55 | * with the exception of VHOST_INVALID_FEATURE_BIT, | |
56 | * which should always be the last entry. | |
57 | */ | |
1e0a84ea | 58 | const int vdpa_feature_bits[] = { |
1e0a84ea | 59 | VIRTIO_F_ANY_LAYOUT, |
2875a0ca HJ |
60 | VIRTIO_F_IOMMU_PLATFORM, |
61 | VIRTIO_F_NOTIFY_ON_EMPTY, | |
62 | VIRTIO_F_RING_PACKED, | |
63 | VIRTIO_F_RING_RESET, | |
1e0a84ea CL |
64 | VIRTIO_F_VERSION_1, |
65 | VIRTIO_NET_F_CSUM, | |
51e84244 | 66 | VIRTIO_NET_F_CTRL_GUEST_OFFLOADS, |
2875a0ca HJ |
67 | VIRTIO_NET_F_CTRL_MAC_ADDR, |
68 | VIRTIO_NET_F_CTRL_RX, | |
69 | VIRTIO_NET_F_CTRL_RX_EXTRA, | |
70 | VIRTIO_NET_F_CTRL_VLAN, | |
71 | VIRTIO_NET_F_CTRL_VQ, | |
1e0a84ea | 72 | VIRTIO_NET_F_GSO, |
2875a0ca HJ |
73 | VIRTIO_NET_F_GUEST_CSUM, |
74 | VIRTIO_NET_F_GUEST_ECN, | |
1e0a84ea CL |
75 | VIRTIO_NET_F_GUEST_TSO4, |
76 | VIRTIO_NET_F_GUEST_TSO6, | |
1e0a84ea | 77 | VIRTIO_NET_F_GUEST_UFO, |
9da16849 AM |
78 | VIRTIO_NET_F_GUEST_USO4, |
79 | VIRTIO_NET_F_GUEST_USO6, | |
2875a0ca HJ |
80 | VIRTIO_NET_F_HASH_REPORT, |
81 | VIRTIO_NET_F_HOST_ECN, | |
1e0a84ea CL |
82 | VIRTIO_NET_F_HOST_TSO4, |
83 | VIRTIO_NET_F_HOST_TSO6, | |
1e0a84ea | 84 | VIRTIO_NET_F_HOST_UFO, |
9da16849 | 85 | VIRTIO_NET_F_HOST_USO, |
2875a0ca | 86 | VIRTIO_NET_F_MQ, |
1e0a84ea CL |
87 | VIRTIO_NET_F_MRG_RXBUF, |
88 | VIRTIO_NET_F_MTU, | |
0145c393 | 89 | VIRTIO_NET_F_RSS, |
9aa47edd | 90 | VIRTIO_NET_F_STATUS, |
2875a0ca HJ |
91 | VIRTIO_RING_F_EVENT_IDX, |
92 | VIRTIO_RING_F_INDIRECT_DESC, | |
93 | ||
94 | /* VHOST_INVALID_FEATURE_BIT should always be the last entry */ | |
1e0a84ea CL |
95 | VHOST_INVALID_FEATURE_BIT |
96 | }; | |
97 | ||
1576dbb5 EP |
98 | /** Supported device specific feature bits with SVQ */ |
99 | static const uint64_t vdpa_svq_device_features = | |
100 | BIT_ULL(VIRTIO_NET_F_CSUM) | | |
101 | BIT_ULL(VIRTIO_NET_F_GUEST_CSUM) | | |
4b4a1378 | 102 | BIT_ULL(VIRTIO_NET_F_CTRL_GUEST_OFFLOADS) | |
1576dbb5 EP |
103 | BIT_ULL(VIRTIO_NET_F_MTU) | |
104 | BIT_ULL(VIRTIO_NET_F_MAC) | | |
105 | BIT_ULL(VIRTIO_NET_F_GUEST_TSO4) | | |
106 | BIT_ULL(VIRTIO_NET_F_GUEST_TSO6) | | |
107 | BIT_ULL(VIRTIO_NET_F_GUEST_ECN) | | |
108 | BIT_ULL(VIRTIO_NET_F_GUEST_UFO) | | |
109 | BIT_ULL(VIRTIO_NET_F_HOST_TSO4) | | |
110 | BIT_ULL(VIRTIO_NET_F_HOST_TSO6) | | |
111 | BIT_ULL(VIRTIO_NET_F_HOST_ECN) | | |
112 | BIT_ULL(VIRTIO_NET_F_HOST_UFO) | | |
113 | BIT_ULL(VIRTIO_NET_F_MRG_RXBUF) | | |
114 | BIT_ULL(VIRTIO_NET_F_STATUS) | | |
115 | BIT_ULL(VIRTIO_NET_F_CTRL_VQ) | | |
ea6eec49 | 116 | BIT_ULL(VIRTIO_NET_F_CTRL_RX) | |
e213c45a | 117 | BIT_ULL(VIRTIO_NET_F_CTRL_VLAN) | |
d669b7bb | 118 | BIT_ULL(VIRTIO_NET_F_CTRL_RX_EXTRA) | |
72b99a87 | 119 | BIT_ULL(VIRTIO_NET_F_MQ) | |
1576dbb5 EP |
120 | BIT_ULL(VIRTIO_F_ANY_LAYOUT) | |
121 | BIT_ULL(VIRTIO_NET_F_CTRL_MAC_ADDR) | | |
609ab4c3 EP |
122 | /* VHOST_F_LOG_ALL is exposed by SVQ */ |
123 | BIT_ULL(VHOST_F_LOG_ALL) | | |
556b67d4 | 124 | BIT_ULL(VIRTIO_NET_F_HASH_REPORT) | |
07eba949 | 125 | BIT_ULL(VIRTIO_NET_F_RSS) | |
1576dbb5 | 126 | BIT_ULL(VIRTIO_NET_F_RSC_EXT) | |
0d74e2b7 EP |
127 | BIT_ULL(VIRTIO_NET_F_STANDBY) | |
128 | BIT_ULL(VIRTIO_NET_F_SPEED_DUPLEX); | |
1576dbb5 | 129 | |
c1a10086 EP |
130 | #define VHOST_VDPA_NET_CVQ_ASID 1 |
131 | ||
1e0a84ea CL |
132 | VHostNetState *vhost_vdpa_get_vhost_net(NetClientState *nc) |
133 | { | |
134 | VhostVDPAState *s = DO_UPCAST(VhostVDPAState, nc, nc); | |
135 | assert(nc->info->type == NET_CLIENT_DRIVER_VHOST_VDPA); | |
136 | return s->vhost_net; | |
137 | } | |
138 | ||
915bf6cc EP |
139 | static size_t vhost_vdpa_net_cvq_cmd_len(void) |
140 | { | |
141 | /* | |
142 | * MAC_TABLE_SET is the ctrl command that produces the longer out buffer. | |
143 | * In buffer is always 1 byte, so it should fit here | |
144 | */ | |
145 | return sizeof(struct virtio_net_ctrl_hdr) + | |
146 | 2 * sizeof(struct virtio_net_ctrl_mac) + | |
147 | MAC_TABLE_ENTRIES * ETH_ALEN; | |
148 | } | |
149 | ||
150 | static size_t vhost_vdpa_net_cvq_cmd_page_len(void) | |
151 | { | |
152 | return ROUND_UP(vhost_vdpa_net_cvq_cmd_len(), qemu_real_host_page_size()); | |
153 | } | |
154 | ||
36e46472 EP |
155 | static bool vhost_vdpa_net_valid_svq_features(uint64_t features, Error **errp) |
156 | { | |
157 | uint64_t invalid_dev_features = | |
158 | features & ~vdpa_svq_device_features & | |
159 | /* Transport are all accepted at this point */ | |
160 | ~MAKE_64BIT_MASK(VIRTIO_TRANSPORT_F_START, | |
161 | VIRTIO_TRANSPORT_F_END - VIRTIO_TRANSPORT_F_START); | |
162 | ||
163 | if (invalid_dev_features) { | |
164 | error_setg(errp, "vdpa svq does not work with features 0x%" PRIx64, | |
165 | invalid_dev_features); | |
258a0394 | 166 | return false; |
36e46472 EP |
167 | } |
168 | ||
258a0394 | 169 | return vhost_svq_valid_features(features, errp); |
36e46472 EP |
170 | } |
171 | ||
1e0a84ea CL |
172 | static int vhost_vdpa_net_check_device_id(struct vhost_net *net) |
173 | { | |
174 | uint32_t device_id; | |
175 | int ret; | |
176 | struct vhost_dev *hdev; | |
177 | ||
178 | hdev = (struct vhost_dev *)&net->dev; | |
179 | ret = hdev->vhost_ops->vhost_get_device_id(hdev, &device_id); | |
180 | if (device_id != VIRTIO_ID_NET) { | |
181 | return -ENOTSUP; | |
182 | } | |
183 | return ret; | |
184 | } | |
185 | ||
40237840 JW |
186 | static int vhost_vdpa_add(NetClientState *ncs, void *be, |
187 | int queue_pair_index, int nvqs) | |
1e0a84ea CL |
188 | { |
189 | VhostNetOptions options; | |
190 | struct vhost_net *net = NULL; | |
191 | VhostVDPAState *s; | |
192 | int ret; | |
193 | ||
194 | options.backend_type = VHOST_BACKEND_TYPE_VDPA; | |
195 | assert(ncs->info->type == NET_CLIENT_DRIVER_VHOST_VDPA); | |
196 | s = DO_UPCAST(VhostVDPAState, nc, ncs); | |
197 | options.net_backend = ncs; | |
198 | options.opaque = be; | |
199 | options.busyloop_timeout = 0; | |
40237840 | 200 | options.nvqs = nvqs; |
1e0a84ea CL |
201 | |
202 | net = vhost_net_init(&options); | |
203 | if (!net) { | |
204 | error_report("failed to init vhost_net for queue"); | |
a97ef87a | 205 | goto err_init; |
1e0a84ea | 206 | } |
1e0a84ea CL |
207 | s->vhost_net = net; |
208 | ret = vhost_vdpa_net_check_device_id(net); | |
209 | if (ret) { | |
a97ef87a | 210 | goto err_check; |
1e0a84ea CL |
211 | } |
212 | return 0; | |
a97ef87a JW |
213 | err_check: |
214 | vhost_net_cleanup(net); | |
215 | g_free(net); | |
216 | err_init: | |
1e0a84ea CL |
217 | return -1; |
218 | } | |
219 | ||
220 | static void vhost_vdpa_cleanup(NetClientState *nc) | |
221 | { | |
222 | VhostVDPAState *s = DO_UPCAST(VhostVDPAState, nc, nc); | |
223 | ||
a0d7215e AS |
224 | /* |
225 | * If a peer NIC is attached, do not cleanup anything. | |
226 | * Cleanup will happen as a part of qemu_cleanup() -> net_cleanup() | |
227 | * when the guest is shutting down. | |
228 | */ | |
229 | if (nc->peer && nc->peer->info->type == NET_CLIENT_DRIVER_NIC) { | |
230 | return; | |
231 | } | |
babf8b87 EP |
232 | munmap(s->cvq_cmd_out_buffer, vhost_vdpa_net_cvq_cmd_page_len()); |
233 | munmap(s->status, vhost_vdpa_net_cvq_cmd_page_len()); | |
1e0a84ea CL |
234 | if (s->vhost_net) { |
235 | vhost_net_cleanup(s->vhost_net); | |
236 | g_free(s->vhost_net); | |
237 | s->vhost_net = NULL; | |
57b3a7d8 | 238 | } |
8c5e9809 EP |
239 | if (s->vhost_vdpa.index != 0) { |
240 | return; | |
241 | } | |
f12b2498 | 242 | qemu_close(s->vhost_vdpa.shared->device_fd); |
8c5e9809 | 243 | g_free(s->vhost_vdpa.shared); |
1e0a84ea CL |
244 | } |
245 | ||
d1fd2d31 HJ |
246 | /** Dummy SetSteeringEBPF to support RSS for vhost-vdpa backend */ |
247 | static bool vhost_vdpa_set_steering_ebpf(NetClientState *nc, int prog_fd) | |
248 | { | |
249 | return true; | |
250 | } | |
251 | ||
1e0a84ea CL |
252 | static bool vhost_vdpa_has_vnet_hdr(NetClientState *nc) |
253 | { | |
254 | assert(nc->info->type == NET_CLIENT_DRIVER_VHOST_VDPA); | |
255 | ||
256 | return true; | |
257 | } | |
258 | ||
259 | static bool vhost_vdpa_has_ufo(NetClientState *nc) | |
260 | { | |
261 | assert(nc->info->type == NET_CLIENT_DRIVER_VHOST_VDPA); | |
262 | VhostVDPAState *s = DO_UPCAST(VhostVDPAState, nc, nc); | |
263 | uint64_t features = 0; | |
264 | features |= (1ULL << VIRTIO_NET_F_HOST_UFO); | |
265 | features = vhost_net_get_features(s->vhost_net, features); | |
266 | return !!(features & (1ULL << VIRTIO_NET_F_HOST_UFO)); | |
267 | ||
268 | } | |
269 | ||
ee8a1c63 KW |
270 | static bool vhost_vdpa_check_peer_type(NetClientState *nc, ObjectClass *oc, |
271 | Error **errp) | |
272 | { | |
273 | const char *driver = object_class_get_name(oc); | |
274 | ||
275 | if (!g_str_has_prefix(driver, "virtio-net-")) { | |
276 | error_setg(errp, "vhost-vdpa requires frontend driver virtio-net-*"); | |
277 | return false; | |
278 | } | |
279 | ||
280 | return true; | |
281 | } | |
282 | ||
846a1e85 EP |
283 | /** Dummy receive in case qemu falls back to userland tap networking */ |
284 | static ssize_t vhost_vdpa_receive(NetClientState *nc, const uint8_t *buf, | |
285 | size_t size) | |
286 | { | |
bc5add1d | 287 | return size; |
846a1e85 EP |
288 | } |
289 | ||
69498430 EP |
290 | static void vhost_vdpa_net_log_global_enable(VhostVDPAState *s, bool enable) |
291 | { | |
292 | struct vhost_vdpa *v = &s->vhost_vdpa; | |
293 | VirtIONet *n; | |
294 | VirtIODevice *vdev; | |
295 | int data_queue_pairs, cvq, r; | |
296 | ||
297 | /* We are only called on the first data vqs and only if x-svq is not set */ | |
298 | if (s->vhost_vdpa.shadow_vqs_enabled == enable) { | |
299 | return; | |
300 | } | |
301 | ||
302 | vdev = v->dev->vdev; | |
303 | n = VIRTIO_NET(vdev); | |
304 | if (!n->vhost_started) { | |
305 | return; | |
306 | } | |
307 | ||
308 | data_queue_pairs = n->multiqueue ? n->max_queue_pairs : 1; | |
309 | cvq = virtio_vdev_has_feature(vdev, VIRTIO_NET_F_CTRL_VQ) ? | |
310 | n->max_ncs - n->max_queue_pairs : 0; | |
311 | /* | |
312 | * TODO: vhost_net_stop does suspend, get_base and reset. We can be smarter | |
313 | * in the future and resume the device if read-only operations between | |
314 | * suspend and reset goes wrong. | |
315 | */ | |
316 | vhost_net_stop(vdev, n->nic->ncs, data_queue_pairs, cvq); | |
317 | ||
318 | /* Start will check migration setup_or_active to configure or not SVQ */ | |
319 | r = vhost_net_start(vdev, n->nic->ncs, data_queue_pairs, cvq); | |
320 | if (unlikely(r < 0)) { | |
321 | error_report("unable to start vhost net: %s(%d)", g_strerror(-r), -r); | |
322 | } | |
323 | } | |
324 | ||
325 | static void vdpa_net_migration_state_notifier(Notifier *notifier, void *data) | |
326 | { | |
327 | MigrationState *migration = data; | |
328 | VhostVDPAState *s = container_of(notifier, VhostVDPAState, | |
329 | migration_state); | |
330 | ||
331 | if (migration_in_setup(migration)) { | |
332 | vhost_vdpa_net_log_global_enable(s, true); | |
333 | } else if (migration_has_failed(migration)) { | |
334 | vhost_vdpa_net_log_global_enable(s, false); | |
335 | } | |
336 | } | |
337 | ||
00ef422e EP |
338 | static void vhost_vdpa_net_data_start_first(VhostVDPAState *s) |
339 | { | |
340 | struct vhost_vdpa *v = &s->vhost_vdpa; | |
341 | ||
d9cda213 SS |
342 | migration_add_notifier(&s->migration_state, |
343 | vdpa_net_migration_state_notifier); | |
00ef422e | 344 | if (v->shadow_vqs_enabled) { |
ae25ff41 EP |
345 | v->shared->iova_tree = vhost_iova_tree_new(v->shared->iova_range.first, |
346 | v->shared->iova_range.last); | |
00ef422e EP |
347 | } |
348 | } | |
349 | ||
350 | static int vhost_vdpa_net_data_start(NetClientState *nc) | |
351 | { | |
352 | VhostVDPAState *s = DO_UPCAST(VhostVDPAState, nc, nc); | |
353 | struct vhost_vdpa *v = &s->vhost_vdpa; | |
354 | ||
355 | assert(nc->info->type == NET_CLIENT_DRIVER_VHOST_VDPA); | |
356 | ||
69498430 EP |
357 | if (s->always_svq || |
358 | migration_is_setup_or_active(migrate_get_current()->state)) { | |
359 | v->shadow_vqs_enabled = true; | |
69498430 EP |
360 | } else { |
361 | v->shadow_vqs_enabled = false; | |
69498430 EP |
362 | } |
363 | ||
00ef422e | 364 | if (v->index == 0) { |
a6e823d4 | 365 | v->shared->shadow_data = v->shadow_vqs_enabled; |
00ef422e EP |
366 | vhost_vdpa_net_data_start_first(s); |
367 | return 0; | |
368 | } | |
369 | ||
00ef422e EP |
370 | return 0; |
371 | } | |
372 | ||
6c482547 EP |
373 | static int vhost_vdpa_net_data_load(NetClientState *nc) |
374 | { | |
375 | VhostVDPAState *s = DO_UPCAST(VhostVDPAState, nc, nc); | |
376 | struct vhost_vdpa *v = &s->vhost_vdpa; | |
377 | bool has_cvq = v->dev->vq_index_end % 2; | |
378 | ||
379 | if (has_cvq) { | |
380 | return 0; | |
381 | } | |
382 | ||
383 | for (int i = 0; i < v->dev->nvqs; ++i) { | |
384 | vhost_vdpa_set_vring_ready(v, i + v->dev->vq_index); | |
385 | } | |
386 | return 0; | |
387 | } | |
388 | ||
00ef422e EP |
389 | static void vhost_vdpa_net_client_stop(NetClientState *nc) |
390 | { | |
391 | VhostVDPAState *s = DO_UPCAST(VhostVDPAState, nc, nc); | |
392 | struct vhost_dev *dev; | |
393 | ||
394 | assert(nc->info->type == NET_CLIENT_DRIVER_VHOST_VDPA); | |
395 | ||
69498430 | 396 | if (s->vhost_vdpa.index == 0) { |
d9cda213 | 397 | migration_remove_notifier(&s->migration_state); |
69498430 EP |
398 | } |
399 | ||
00ef422e EP |
400 | dev = s->vhost_vdpa.dev; |
401 | if (dev->vq_index + dev->nvqs == dev->vq_index_end) { | |
5edb02e8 EP |
402 | g_clear_pointer(&s->vhost_vdpa.shared->iova_tree, |
403 | vhost_iova_tree_delete); | |
00ef422e EP |
404 | } |
405 | } | |
406 | ||
1e0a84ea CL |
407 | static NetClientInfo net_vhost_vdpa_info = { |
408 | .type = NET_CLIENT_DRIVER_VHOST_VDPA, | |
409 | .size = sizeof(VhostVDPAState), | |
846a1e85 | 410 | .receive = vhost_vdpa_receive, |
00ef422e | 411 | .start = vhost_vdpa_net_data_start, |
6c482547 | 412 | .load = vhost_vdpa_net_data_load, |
00ef422e | 413 | .stop = vhost_vdpa_net_client_stop, |
1e0a84ea CL |
414 | .cleanup = vhost_vdpa_cleanup, |
415 | .has_vnet_hdr = vhost_vdpa_has_vnet_hdr, | |
416 | .has_ufo = vhost_vdpa_has_ufo, | |
ee8a1c63 | 417 | .check_peer_type = vhost_vdpa_check_peer_type, |
d1fd2d31 | 418 | .set_steering_ebpf = vhost_vdpa_set_steering_ebpf, |
1e0a84ea CL |
419 | }; |
420 | ||
152128d6 EP |
421 | static int64_t vhost_vdpa_get_vring_group(int device_fd, unsigned vq_index, |
422 | Error **errp) | |
c1a10086 EP |
423 | { |
424 | struct vhost_vring_state state = { | |
425 | .index = vq_index, | |
426 | }; | |
427 | int r = ioctl(device_fd, VHOST_VDPA_GET_VRING_GROUP, &state); | |
428 | ||
429 | if (unlikely(r < 0)) { | |
0f2bb0bf | 430 | r = -errno; |
152128d6 | 431 | error_setg_errno(errp, errno, "Cannot get VQ %u group", vq_index); |
c1a10086 EP |
432 | return r; |
433 | } | |
434 | ||
435 | return state.num; | |
436 | } | |
437 | ||
438 | static int vhost_vdpa_set_address_space_id(struct vhost_vdpa *v, | |
439 | unsigned vq_group, | |
440 | unsigned asid_num) | |
441 | { | |
442 | struct vhost_vring_state asid = { | |
443 | .index = vq_group, | |
444 | .num = asid_num, | |
445 | }; | |
446 | int r; | |
447 | ||
f12b2498 | 448 | r = ioctl(v->shared->device_fd, VHOST_VDPA_SET_GROUP_ASID, &asid); |
c1a10086 EP |
449 | if (unlikely(r < 0)) { |
450 | error_report("Can't set vq group %u asid %u, errno=%d (%s)", | |
451 | asid.index, asid.num, errno, g_strerror(errno)); | |
452 | } | |
453 | return r; | |
454 | } | |
455 | ||
2df4dd31 EP |
456 | static void vhost_vdpa_cvq_unmap_buf(struct vhost_vdpa *v, void *addr) |
457 | { | |
5edb02e8 | 458 | VhostIOVATree *tree = v->shared->iova_tree; |
2df4dd31 EP |
459 | DMAMap needle = { |
460 | /* | |
461 | * No need to specify size or to look for more translations since | |
462 | * this contiguous chunk was allocated by us. | |
463 | */ | |
464 | .translated_addr = (hwaddr)(uintptr_t)addr, | |
465 | }; | |
466 | const DMAMap *map = vhost_iova_tree_find_iova(tree, &needle); | |
467 | int r; | |
468 | ||
469 | if (unlikely(!map)) { | |
470 | error_report("Cannot locate expected map"); | |
471 | return; | |
472 | } | |
473 | ||
6f03d9ef EP |
474 | r = vhost_vdpa_dma_unmap(v->shared, v->address_space_id, map->iova, |
475 | map->size + 1); | |
2df4dd31 EP |
476 | if (unlikely(r != 0)) { |
477 | error_report("Device cannot unmap: %s(%d)", g_strerror(r), r); | |
478 | } | |
479 | ||
69292a8e | 480 | vhost_iova_tree_remove(tree, *map); |
2df4dd31 EP |
481 | } |
482 | ||
7a7f87e9 EP |
483 | /** Map CVQ buffer. */ |
484 | static int vhost_vdpa_cvq_map_buf(struct vhost_vdpa *v, void *buf, size_t size, | |
485 | bool write) | |
2df4dd31 EP |
486 | { |
487 | DMAMap map = {}; | |
488 | int r; | |
489 | ||
2df4dd31 | 490 | map.translated_addr = (hwaddr)(uintptr_t)buf; |
7a7f87e9 | 491 | map.size = size - 1; |
2df4dd31 | 492 | map.perm = write ? IOMMU_RW : IOMMU_RO, |
5edb02e8 | 493 | r = vhost_iova_tree_map_alloc(v->shared->iova_tree, &map); |
2df4dd31 EP |
494 | if (unlikely(r != IOVA_OK)) { |
495 | error_report("Cannot map injected element"); | |
7a7f87e9 | 496 | return r; |
2df4dd31 EP |
497 | } |
498 | ||
6f03d9ef | 499 | r = vhost_vdpa_dma_map(v->shared, v->address_space_id, map.iova, |
cd831ed5 | 500 | vhost_vdpa_net_cvq_cmd_page_len(), buf, !write); |
2df4dd31 EP |
501 | if (unlikely(r < 0)) { |
502 | goto dma_map_err; | |
503 | } | |
504 | ||
7a7f87e9 | 505 | return 0; |
2df4dd31 EP |
506 | |
507 | dma_map_err: | |
5edb02e8 | 508 | vhost_iova_tree_remove(v->shared->iova_tree, map); |
7a7f87e9 | 509 | return r; |
2df4dd31 EP |
510 | } |
511 | ||
7a7f87e9 | 512 | static int vhost_vdpa_net_cvq_start(NetClientState *nc) |
2df4dd31 | 513 | { |
a6e823d4 | 514 | VhostVDPAState *s; |
c1a10086 | 515 | struct vhost_vdpa *v; |
c1a10086 | 516 | int64_t cvq_group; |
152128d6 EP |
517 | int r; |
518 | Error *err = NULL; | |
2df4dd31 | 519 | |
7a7f87e9 EP |
520 | assert(nc->info->type == NET_CLIENT_DRIVER_VHOST_VDPA); |
521 | ||
522 | s = DO_UPCAST(VhostVDPAState, nc, nc); | |
c1a10086 EP |
523 | v = &s->vhost_vdpa; |
524 | ||
a6e823d4 | 525 | v->shadow_vqs_enabled = v->shared->shadow_data; |
c1a10086 EP |
526 | s->vhost_vdpa.address_space_id = VHOST_VDPA_GUEST_PA_ASID; |
527 | ||
a6e823d4 | 528 | if (v->shared->shadow_data) { |
c1a10086 EP |
529 | /* SVQ is already configured for all virtqueues */ |
530 | goto out; | |
531 | } | |
532 | ||
533 | /* | |
534 | * If we early return in these cases SVQ will not be enabled. The migration | |
535 | * will be blocked as long as vhost-vdpa backends will not offer _F_LOG. | |
c1a10086 | 536 | */ |
152128d6 EP |
537 | if (!vhost_vdpa_net_valid_svq_features(v->dev->features, NULL)) { |
538 | return 0; | |
c1a10086 | 539 | } |
152128d6 EP |
540 | |
541 | if (!s->cvq_isolated) { | |
c1a10086 EP |
542 | return 0; |
543 | } | |
544 | ||
f12b2498 | 545 | cvq_group = vhost_vdpa_get_vring_group(v->shared->device_fd, |
152128d6 EP |
546 | v->dev->vq_index_end - 1, |
547 | &err); | |
c1a10086 | 548 | if (unlikely(cvq_group < 0)) { |
152128d6 | 549 | error_report_err(err); |
c1a10086 EP |
550 | return cvq_group; |
551 | } | |
c1a10086 EP |
552 | |
553 | r = vhost_vdpa_set_address_space_id(v, cvq_group, VHOST_VDPA_NET_CVQ_ASID); | |
554 | if (unlikely(r < 0)) { | |
555 | return r; | |
556 | } | |
557 | ||
c1a10086 EP |
558 | v->shadow_vqs_enabled = true; |
559 | s->vhost_vdpa.address_space_id = VHOST_VDPA_NET_CVQ_ASID; | |
560 | ||
561 | out: | |
7a7f87e9 EP |
562 | if (!s->vhost_vdpa.shadow_vqs_enabled) { |
563 | return 0; | |
2df4dd31 EP |
564 | } |
565 | ||
5edb02e8 EP |
566 | /* |
567 | * If other vhost_vdpa already have an iova_tree, reuse it for simplicity, | |
568 | * whether CVQ shares ASID with guest or not, because: | |
569 | * - Memory listener need access to guest's memory addresses allocated in | |
570 | * the IOVA tree. | |
571 | * - There should be plenty of IOVA address space for both ASID not to | |
572 | * worry about collisions between them. Guest's translations are still | |
573 | * validated with virtio virtqueue_pop so there is no risk for the guest | |
574 | * to access memory that it shouldn't. | |
575 | * | |
576 | * To allocate a iova tree per ASID is doable but it complicates the code | |
577 | * and it is not worth it for the moment. | |
578 | */ | |
579 | if (!v->shared->iova_tree) { | |
ae25ff41 EP |
580 | v->shared->iova_tree = vhost_iova_tree_new(v->shared->iova_range.first, |
581 | v->shared->iova_range.last); | |
00ef422e EP |
582 | } |
583 | ||
7a7f87e9 EP |
584 | r = vhost_vdpa_cvq_map_buf(&s->vhost_vdpa, s->cvq_cmd_out_buffer, |
585 | vhost_vdpa_net_cvq_cmd_page_len(), false); | |
586 | if (unlikely(r < 0)) { | |
587 | return r; | |
588 | } | |
589 | ||
17fb889f | 590 | r = vhost_vdpa_cvq_map_buf(&s->vhost_vdpa, s->status, |
7a7f87e9 EP |
591 | vhost_vdpa_net_cvq_cmd_page_len(), true); |
592 | if (unlikely(r < 0)) { | |
2df4dd31 | 593 | vhost_vdpa_cvq_unmap_buf(&s->vhost_vdpa, s->cvq_cmd_out_buffer); |
2df4dd31 EP |
594 | } |
595 | ||
7a7f87e9 EP |
596 | return r; |
597 | } | |
598 | ||
599 | static void vhost_vdpa_net_cvq_stop(NetClientState *nc) | |
600 | { | |
601 | VhostVDPAState *s = DO_UPCAST(VhostVDPAState, nc, nc); | |
602 | ||
603 | assert(nc->info->type == NET_CLIENT_DRIVER_VHOST_VDPA); | |
604 | ||
605 | if (s->vhost_vdpa.shadow_vqs_enabled) { | |
606 | vhost_vdpa_cvq_unmap_buf(&s->vhost_vdpa, s->cvq_cmd_out_buffer); | |
17fb889f | 607 | vhost_vdpa_cvq_unmap_buf(&s->vhost_vdpa, s->status); |
7a7f87e9 | 608 | } |
00ef422e EP |
609 | |
610 | vhost_vdpa_net_client_stop(nc); | |
2df4dd31 EP |
611 | } |
612 | ||
0e6bff0d HJ |
613 | static ssize_t vhost_vdpa_net_cvq_add(VhostVDPAState *s, |
614 | const struct iovec *out_sg, size_t out_num, | |
615 | const struct iovec *in_sg, size_t in_num) | |
be4278b6 | 616 | { |
be4278b6 EP |
617 | VhostShadowVirtqueue *svq = g_ptr_array_index(s->vhost_vdpa.shadow_vqs, 0); |
618 | int r; | |
619 | ||
0e6bff0d | 620 | r = vhost_svq_add(svq, out_sg, out_num, in_sg, in_num, NULL); |
be4278b6 EP |
621 | if (unlikely(r != 0)) { |
622 | if (unlikely(r == -ENOSPC)) { | |
623 | qemu_log_mask(LOG_GUEST_ERROR, "%s: No space on device queue\n", | |
624 | __func__); | |
625 | } | |
be4278b6 EP |
626 | } |
627 | ||
a864a321 HJ |
628 | return r; |
629 | } | |
630 | ||
631 | /* | |
632 | * Convenience wrapper to poll SVQ for multiple control commands. | |
633 | * | |
634 | * Caller should hold the BQL when invoking this function, and should take | |
635 | * the answer before SVQ pulls by itself when BQL is released. | |
636 | */ | |
637 | static ssize_t vhost_vdpa_net_svq_poll(VhostVDPAState *s, size_t cmds_in_flight) | |
638 | { | |
639 | VhostShadowVirtqueue *svq = g_ptr_array_index(s->vhost_vdpa.shadow_vqs, 0); | |
640 | return vhost_svq_poll(svq, cmds_in_flight); | |
be4278b6 EP |
641 | } |
642 | ||
1d7e2a8f HJ |
643 | static void vhost_vdpa_net_load_cursor_reset(VhostVDPAState *s, |
644 | struct iovec *out_cursor, | |
645 | struct iovec *in_cursor) | |
646 | { | |
647 | /* reset the cursor of the output buffer for the device */ | |
648 | out_cursor->iov_base = s->cvq_cmd_out_buffer; | |
649 | out_cursor->iov_len = vhost_vdpa_net_cvq_cmd_page_len(); | |
650 | ||
651 | /* reset the cursor of the in buffer for the device */ | |
652 | in_cursor->iov_base = s->status; | |
653 | in_cursor->iov_len = vhost_vdpa_net_cvq_cmd_page_len(); | |
654 | } | |
655 | ||
acec5f68 HJ |
656 | /* |
657 | * Poll SVQ for multiple pending control commands and check the device's ack. | |
658 | * | |
659 | * Caller should hold the BQL when invoking this function. | |
660 | * | |
661 | * @s: The VhostVDPAState | |
662 | * @len: The length of the pending status shadow buffer | |
663 | */ | |
664 | static ssize_t vhost_vdpa_net_svq_flush(VhostVDPAState *s, size_t len) | |
665 | { | |
666 | /* device uses a one-byte length ack for each control command */ | |
667 | ssize_t dev_written = vhost_vdpa_net_svq_poll(s, len); | |
668 | if (unlikely(dev_written != len)) { | |
669 | return -EIO; | |
670 | } | |
671 | ||
672 | /* check the device's ack */ | |
673 | for (int i = 0; i < len; ++i) { | |
674 | if (s->status[i] != VIRTIO_NET_OK) { | |
675 | return -EIO; | |
676 | } | |
677 | } | |
678 | return 0; | |
be4278b6 EP |
679 | } |
680 | ||
1d7e2a8f HJ |
681 | static ssize_t vhost_vdpa_net_load_cmd(VhostVDPAState *s, |
682 | struct iovec *out_cursor, | |
683 | struct iovec *in_cursor, uint8_t class, | |
2848c6aa HJ |
684 | uint8_t cmd, const struct iovec *data_sg, |
685 | size_t data_num) | |
f73c0c43 EP |
686 | { |
687 | const struct virtio_net_ctrl_hdr ctrl = { | |
688 | .class = class, | |
689 | .cmd = cmd, | |
690 | }; | |
acec5f68 | 691 | size_t data_size = iov_size(data_sg, data_num), cmd_size; |
1d7e2a8f | 692 | struct iovec out, in; |
a864a321 | 693 | ssize_t r; |
acec5f68 HJ |
694 | unsigned dummy_cursor_iov_cnt; |
695 | VhostShadowVirtqueue *svq = g_ptr_array_index(s->vhost_vdpa.shadow_vqs, 0); | |
f73c0c43 EP |
696 | |
697 | assert(data_size < vhost_vdpa_net_cvq_cmd_page_len() - sizeof(ctrl)); | |
acec5f68 HJ |
698 | cmd_size = sizeof(ctrl) + data_size; |
699 | if (vhost_svq_available_slots(svq) < 2 || | |
700 | iov_size(out_cursor, 1) < cmd_size) { | |
701 | /* | |
702 | * It is time to flush all pending control commands if SVQ is full | |
703 | * or control commands shadow buffers are full. | |
704 | * | |
705 | * We can poll here since we've had BQL from the time | |
706 | * we sent the descriptor. | |
707 | */ | |
708 | r = vhost_vdpa_net_svq_flush(s, in_cursor->iov_base - | |
709 | (void *)s->status); | |
710 | if (unlikely(r < 0)) { | |
711 | return r; | |
712 | } | |
f73c0c43 | 713 | |
acec5f68 HJ |
714 | vhost_vdpa_net_load_cursor_reset(s, out_cursor, in_cursor); |
715 | } | |
f73c0c43 | 716 | |
2848c6aa | 717 | /* pack the CVQ command header */ |
1d7e2a8f | 718 | iov_from_buf(out_cursor, 1, 0, &ctrl, sizeof(ctrl)); |
2848c6aa HJ |
719 | /* pack the CVQ command command-specific-data */ |
720 | iov_to_buf(data_sg, data_num, 0, | |
1d7e2a8f HJ |
721 | out_cursor->iov_base + sizeof(ctrl), data_size); |
722 | ||
723 | /* extract the required buffer from the cursor for output */ | |
acec5f68 | 724 | iov_copy(&out, 1, out_cursor, 1, 0, cmd_size); |
1d7e2a8f HJ |
725 | /* extract the required buffer from the cursor for input */ |
726 | iov_copy(&in, 1, in_cursor, 1, 0, sizeof(*s->status)); | |
2848c6aa | 727 | |
a864a321 HJ |
728 | r = vhost_vdpa_net_cvq_add(s, &out, 1, &in, 1); |
729 | if (unlikely(r < 0)) { | |
730 | return r; | |
731 | } | |
732 | ||
acec5f68 HJ |
733 | /* iterate the cursors */ |
734 | dummy_cursor_iov_cnt = 1; | |
735 | iov_discard_front(&out_cursor, &dummy_cursor_iov_cnt, cmd_size); | |
736 | dummy_cursor_iov_cnt = 1; | |
737 | iov_discard_front(&in_cursor, &dummy_cursor_iov_cnt, sizeof(*s->status)); | |
2848c6aa | 738 | |
acec5f68 | 739 | return 0; |
f73c0c43 EP |
740 | } |
741 | ||
1d7e2a8f HJ |
742 | static int vhost_vdpa_net_load_mac(VhostVDPAState *s, const VirtIONet *n, |
743 | struct iovec *out_cursor, | |
744 | struct iovec *in_cursor) | |
f73c0c43 | 745 | { |
02d3bf09 | 746 | if (virtio_vdev_has_feature(&n->parent_obj, VIRTIO_NET_F_CTRL_MAC_ADDR)) { |
2848c6aa HJ |
747 | const struct iovec data = { |
748 | .iov_base = (void *)n->mac, | |
749 | .iov_len = sizeof(n->mac), | |
750 | }; | |
acec5f68 HJ |
751 | ssize_t r = vhost_vdpa_net_load_cmd(s, out_cursor, in_cursor, |
752 | VIRTIO_NET_CTRL_MAC, | |
753 | VIRTIO_NET_CTRL_MAC_ADDR_SET, | |
754 | &data, 1); | |
755 | if (unlikely(r < 0)) { | |
756 | return r; | |
b479bc3c | 757 | } |
f73c0c43 EP |
758 | } |
759 | ||
0ddcecb8 HJ |
760 | /* |
761 | * According to VirtIO standard, "The device MUST have an | |
762 | * empty MAC filtering table on reset.". | |
763 | * | |
764 | * Therefore, there is no need to send this CVQ command if the | |
765 | * driver also sets an empty MAC filter table, which aligns with | |
766 | * the device's defaults. | |
767 | * | |
768 | * Note that the device's defaults can mismatch the driver's | |
769 | * configuration only at live migration. | |
770 | */ | |
771 | if (!virtio_vdev_has_feature(&n->parent_obj, VIRTIO_NET_F_CTRL_RX) || | |
772 | n->mac_table.in_use == 0) { | |
773 | return 0; | |
774 | } | |
775 | ||
776 | uint32_t uni_entries = n->mac_table.first_multi, | |
777 | uni_macs_size = uni_entries * ETH_ALEN, | |
778 | mul_entries = n->mac_table.in_use - uni_entries, | |
779 | mul_macs_size = mul_entries * ETH_ALEN; | |
780 | struct virtio_net_ctrl_mac uni = { | |
781 | .entries = cpu_to_le32(uni_entries), | |
782 | }; | |
783 | struct virtio_net_ctrl_mac mul = { | |
784 | .entries = cpu_to_le32(mul_entries), | |
785 | }; | |
786 | const struct iovec data[] = { | |
787 | { | |
788 | .iov_base = &uni, | |
789 | .iov_len = sizeof(uni), | |
790 | }, { | |
791 | .iov_base = n->mac_table.macs, | |
792 | .iov_len = uni_macs_size, | |
793 | }, { | |
794 | .iov_base = &mul, | |
795 | .iov_len = sizeof(mul), | |
796 | }, { | |
797 | .iov_base = &n->mac_table.macs[uni_macs_size], | |
798 | .iov_len = mul_macs_size, | |
799 | }, | |
800 | }; | |
acec5f68 HJ |
801 | ssize_t r = vhost_vdpa_net_load_cmd(s, out_cursor, in_cursor, |
802 | VIRTIO_NET_CTRL_MAC, | |
803 | VIRTIO_NET_CTRL_MAC_TABLE_SET, | |
804 | data, ARRAY_SIZE(data)); | |
805 | if (unlikely(r < 0)) { | |
806 | return r; | |
0ddcecb8 HJ |
807 | } |
808 | ||
f73c0c43 EP |
809 | return 0; |
810 | } | |
811 | ||
8b98c15f HJ |
812 | static int vhost_vdpa_net_load_rss(VhostVDPAState *s, const VirtIONet *n, |
813 | struct iovec *out_cursor, | |
b3c09106 | 814 | struct iovec *in_cursor, bool do_rss) |
8b98c15f HJ |
815 | { |
816 | struct virtio_net_rss_config cfg = {}; | |
817 | ssize_t r; | |
818 | g_autofree uint16_t *table = NULL; | |
819 | ||
820 | /* | |
821 | * According to VirtIO standard, "Initially the device has all hash | |
822 | * types disabled and reports only VIRTIO_NET_HASH_REPORT_NONE.". | |
823 | * | |
824 | * Therefore, there is no need to send this CVQ command if the | |
825 | * driver disables the all hash types, which aligns with | |
826 | * the device's defaults. | |
827 | * | |
828 | * Note that the device's defaults can mismatch the driver's | |
829 | * configuration only at live migration. | |
830 | */ | |
831 | if (!n->rss_data.enabled || | |
832 | n->rss_data.hash_types == VIRTIO_NET_HASH_REPORT_NONE) { | |
833 | return 0; | |
834 | } | |
835 | ||
836 | table = g_malloc_n(n->rss_data.indirections_len, | |
837 | sizeof(n->rss_data.indirections_table[0])); | |
838 | cfg.hash_types = cpu_to_le32(n->rss_data.hash_types); | |
839 | ||
b3c09106 HJ |
840 | if (do_rss) { |
841 | /* | |
842 | * According to VirtIO standard, "Number of entries in indirection_table | |
843 | * is (indirection_table_mask + 1)". | |
844 | */ | |
845 | cfg.indirection_table_mask = cpu_to_le16(n->rss_data.indirections_len - | |
846 | 1); | |
847 | cfg.unclassified_queue = cpu_to_le16(n->rss_data.default_queue); | |
848 | for (int i = 0; i < n->rss_data.indirections_len; ++i) { | |
849 | table[i] = cpu_to_le16(n->rss_data.indirections_table[i]); | |
850 | } | |
851 | cfg.max_tx_vq = cpu_to_le16(n->curr_queue_pairs); | |
852 | } else { | |
853 | /* | |
854 | * According to VirtIO standard, "Field reserved MUST contain zeroes. | |
855 | * It is defined to make the structure to match the layout of | |
856 | * virtio_net_rss_config structure, defined in 5.1.6.5.7.". | |
857 | * | |
858 | * Therefore, we need to zero the fields in | |
859 | * struct virtio_net_rss_config, which corresponds to the | |
860 | * `reserved` field in struct virtio_net_hash_config. | |
861 | * | |
862 | * Note that all other fields are zeroed at their definitions, | |
863 | * except for the `indirection_table` field, where the actual data | |
864 | * is stored in the `table` variable to ensure compatibility | |
865 | * with RSS case. Therefore, we need to zero the `table` variable here. | |
866 | */ | |
867 | table[0] = 0; | |
868 | } | |
8b98c15f HJ |
869 | |
870 | /* | |
871 | * Considering that virtio_net_handle_rss() currently does not restore | |
872 | * the hash key length parsed from the CVQ command sent from the guest | |
873 | * into n->rss_data and uses the maximum key length in other code, so | |
874 | * we also employ the maximum key length here. | |
875 | */ | |
876 | cfg.hash_key_length = sizeof(n->rss_data.key); | |
877 | ||
878 | const struct iovec data[] = { | |
879 | { | |
880 | .iov_base = &cfg, | |
881 | .iov_len = offsetof(struct virtio_net_rss_config, | |
882 | indirection_table), | |
883 | }, { | |
884 | .iov_base = table, | |
885 | .iov_len = n->rss_data.indirections_len * | |
886 | sizeof(n->rss_data.indirections_table[0]), | |
887 | }, { | |
888 | .iov_base = &cfg.max_tx_vq, | |
889 | .iov_len = offsetof(struct virtio_net_rss_config, hash_key_data) - | |
890 | offsetof(struct virtio_net_rss_config, max_tx_vq), | |
891 | }, { | |
892 | .iov_base = (void *)n->rss_data.key, | |
893 | .iov_len = sizeof(n->rss_data.key), | |
894 | } | |
895 | }; | |
896 | ||
897 | r = vhost_vdpa_net_load_cmd(s, out_cursor, in_cursor, | |
898 | VIRTIO_NET_CTRL_MQ, | |
b3c09106 | 899 | do_rss ? VIRTIO_NET_CTRL_MQ_RSS_CONFIG : |
8b98c15f HJ |
900 | VIRTIO_NET_CTRL_MQ_HASH_CONFIG, |
901 | data, ARRAY_SIZE(data)); | |
902 | if (unlikely(r < 0)) { | |
903 | return r; | |
904 | } | |
905 | ||
906 | return 0; | |
907 | } | |
908 | ||
f64c7cda | 909 | static int vhost_vdpa_net_load_mq(VhostVDPAState *s, |
1d7e2a8f HJ |
910 | const VirtIONet *n, |
911 | struct iovec *out_cursor, | |
912 | struct iovec *in_cursor) | |
f64c7cda EP |
913 | { |
914 | struct virtio_net_ctrl_mq mq; | |
acec5f68 | 915 | ssize_t r; |
f64c7cda | 916 | |
02d3bf09 | 917 | if (!virtio_vdev_has_feature(&n->parent_obj, VIRTIO_NET_F_MQ)) { |
f64c7cda EP |
918 | return 0; |
919 | } | |
920 | ||
921 | mq.virtqueue_pairs = cpu_to_le16(n->curr_queue_pairs); | |
2848c6aa HJ |
922 | const struct iovec data = { |
923 | .iov_base = &mq, | |
924 | .iov_len = sizeof(mq), | |
925 | }; | |
acec5f68 HJ |
926 | r = vhost_vdpa_net_load_cmd(s, out_cursor, in_cursor, |
927 | VIRTIO_NET_CTRL_MQ, | |
928 | VIRTIO_NET_CTRL_MQ_VQ_PAIRS_SET, | |
929 | &data, 1); | |
930 | if (unlikely(r < 0)) { | |
931 | return r; | |
f45fd95e | 932 | } |
f64c7cda | 933 | |
b3c09106 HJ |
934 | if (virtio_vdev_has_feature(&n->parent_obj, VIRTIO_NET_F_RSS)) { |
935 | /* load the receive-side scaling state */ | |
936 | r = vhost_vdpa_net_load_rss(s, n, out_cursor, in_cursor, true); | |
937 | if (unlikely(r < 0)) { | |
938 | return r; | |
939 | } | |
940 | } else if (virtio_vdev_has_feature(&n->parent_obj, | |
941 | VIRTIO_NET_F_HASH_REPORT)) { | |
942 | /* load the hash calculation state */ | |
943 | r = vhost_vdpa_net_load_rss(s, n, out_cursor, in_cursor, false); | |
944 | if (unlikely(r < 0)) { | |
945 | return r; | |
946 | } | |
8b98c15f HJ |
947 | } |
948 | ||
f45fd95e | 949 | return 0; |
f64c7cda EP |
950 | } |
951 | ||
0b58d368 | 952 | static int vhost_vdpa_net_load_offloads(VhostVDPAState *s, |
1d7e2a8f HJ |
953 | const VirtIONet *n, |
954 | struct iovec *out_cursor, | |
955 | struct iovec *in_cursor) | |
0b58d368 HJ |
956 | { |
957 | uint64_t offloads; | |
acec5f68 | 958 | ssize_t r; |
0b58d368 HJ |
959 | |
960 | if (!virtio_vdev_has_feature(&n->parent_obj, | |
961 | VIRTIO_NET_F_CTRL_GUEST_OFFLOADS)) { | |
962 | return 0; | |
963 | } | |
964 | ||
965 | if (n->curr_guest_offloads == virtio_net_supported_guest_offloads(n)) { | |
966 | /* | |
967 | * According to VirtIO standard, "Upon feature negotiation | |
968 | * corresponding offload gets enabled to preserve | |
969 | * backward compatibility.". | |
970 | * | |
971 | * Therefore, there is no need to send this CVQ command if the | |
972 | * driver also enables all supported offloads, which aligns with | |
973 | * the device's defaults. | |
974 | * | |
975 | * Note that the device's defaults can mismatch the driver's | |
976 | * configuration only at live migration. | |
977 | */ | |
978 | return 0; | |
979 | } | |
980 | ||
981 | offloads = cpu_to_le64(n->curr_guest_offloads); | |
2848c6aa HJ |
982 | const struct iovec data = { |
983 | .iov_base = &offloads, | |
984 | .iov_len = sizeof(offloads), | |
985 | }; | |
acec5f68 HJ |
986 | r = vhost_vdpa_net_load_cmd(s, out_cursor, in_cursor, |
987 | VIRTIO_NET_CTRL_GUEST_OFFLOADS, | |
988 | VIRTIO_NET_CTRL_GUEST_OFFLOADS_SET, | |
989 | &data, 1); | |
990 | if (unlikely(r < 0)) { | |
991 | return r; | |
6f348071 | 992 | } |
0b58d368 | 993 | |
6f348071 | 994 | return 0; |
0b58d368 HJ |
995 | } |
996 | ||
b12f907e | 997 | static int vhost_vdpa_net_load_rx_mode(VhostVDPAState *s, |
1d7e2a8f HJ |
998 | struct iovec *out_cursor, |
999 | struct iovec *in_cursor, | |
b12f907e HJ |
1000 | uint8_t cmd, |
1001 | uint8_t on) | |
1002 | { | |
1003 | const struct iovec data = { | |
1004 | .iov_base = &on, | |
1005 | .iov_len = sizeof(on), | |
1006 | }; | |
acec5f68 | 1007 | ssize_t r; |
24e59cfe | 1008 | |
acec5f68 HJ |
1009 | r = vhost_vdpa_net_load_cmd(s, out_cursor, in_cursor, |
1010 | VIRTIO_NET_CTRL_RX, cmd, &data, 1); | |
1011 | if (unlikely(r < 0)) { | |
1012 | return r; | |
24e59cfe HJ |
1013 | } |
1014 | ||
1015 | return 0; | |
b12f907e HJ |
1016 | } |
1017 | ||
1018 | static int vhost_vdpa_net_load_rx(VhostVDPAState *s, | |
1d7e2a8f HJ |
1019 | const VirtIONet *n, |
1020 | struct iovec *out_cursor, | |
1021 | struct iovec *in_cursor) | |
b12f907e | 1022 | { |
24e59cfe | 1023 | ssize_t r; |
b12f907e HJ |
1024 | |
1025 | if (!virtio_vdev_has_feature(&n->parent_obj, VIRTIO_NET_F_CTRL_RX)) { | |
1026 | return 0; | |
1027 | } | |
1028 | ||
1029 | /* | |
1030 | * According to virtio_net_reset(), device turns promiscuous mode | |
1031 | * on by default. | |
1032 | * | |
0a19d879 | 1033 | * Additionally, according to VirtIO standard, "Since there are |
b12f907e HJ |
1034 | * no guarantees, it can use a hash filter or silently switch to |
1035 | * allmulti or promiscuous mode if it is given too many addresses.". | |
1036 | * QEMU marks `n->mac_table.uni_overflow` if guest sets too many | |
1037 | * non-multicast MAC addresses, indicating that promiscuous mode | |
1038 | * should be enabled. | |
1039 | * | |
1040 | * Therefore, QEMU should only send this CVQ command if the | |
1041 | * `n->mac_table.uni_overflow` is not marked and `n->promisc` is off, | |
1042 | * which sets promiscuous mode on, different from the device's defaults. | |
1043 | * | |
1044 | * Note that the device's defaults can mismatch the driver's | |
1045 | * configuration only at live migration. | |
1046 | */ | |
1047 | if (!n->mac_table.uni_overflow && !n->promisc) { | |
1d7e2a8f HJ |
1048 | r = vhost_vdpa_net_load_rx_mode(s, out_cursor, in_cursor, |
1049 | VIRTIO_NET_CTRL_RX_PROMISC, 0); | |
24e59cfe HJ |
1050 | if (unlikely(r < 0)) { |
1051 | return r; | |
b12f907e HJ |
1052 | } |
1053 | } | |
1054 | ||
1055 | /* | |
1056 | * According to virtio_net_reset(), device turns all-multicast mode | |
1057 | * off by default. | |
1058 | * | |
1059 | * According to VirtIO standard, "Since there are no guarantees, | |
1060 | * it can use a hash filter or silently switch to allmulti or | |
1061 | * promiscuous mode if it is given too many addresses.". QEMU marks | |
1062 | * `n->mac_table.multi_overflow` if guest sets too many | |
1063 | * non-multicast MAC addresses. | |
1064 | * | |
1065 | * Therefore, QEMU should only send this CVQ command if the | |
1066 | * `n->mac_table.multi_overflow` is marked or `n->allmulti` is on, | |
1067 | * which sets all-multicast mode on, different from the device's defaults. | |
1068 | * | |
1069 | * Note that the device's defaults can mismatch the driver's | |
1070 | * configuration only at live migration. | |
1071 | */ | |
1072 | if (n->mac_table.multi_overflow || n->allmulti) { | |
1d7e2a8f HJ |
1073 | r = vhost_vdpa_net_load_rx_mode(s, out_cursor, in_cursor, |
1074 | VIRTIO_NET_CTRL_RX_ALLMULTI, 1); | |
24e59cfe HJ |
1075 | if (unlikely(r < 0)) { |
1076 | return r; | |
b12f907e HJ |
1077 | } |
1078 | } | |
1079 | ||
4fd180c7 HJ |
1080 | if (!virtio_vdev_has_feature(&n->parent_obj, VIRTIO_NET_F_CTRL_RX_EXTRA)) { |
1081 | return 0; | |
1082 | } | |
1083 | ||
1084 | /* | |
1085 | * According to virtio_net_reset(), device turns all-unicast mode | |
1086 | * off by default. | |
1087 | * | |
1088 | * Therefore, QEMU should only send this CVQ command if the driver | |
1089 | * sets all-unicast mode on, different from the device's defaults. | |
1090 | * | |
1091 | * Note that the device's defaults can mismatch the driver's | |
1092 | * configuration only at live migration. | |
1093 | */ | |
1094 | if (n->alluni) { | |
1d7e2a8f HJ |
1095 | r = vhost_vdpa_net_load_rx_mode(s, out_cursor, in_cursor, |
1096 | VIRTIO_NET_CTRL_RX_ALLUNI, 1); | |
24e59cfe HJ |
1097 | if (r < 0) { |
1098 | return r; | |
4fd180c7 HJ |
1099 | } |
1100 | } | |
1101 | ||
1102 | /* | |
1103 | * According to virtio_net_reset(), device turns non-multicast mode | |
1104 | * off by default. | |
1105 | * | |
1106 | * Therefore, QEMU should only send this CVQ command if the driver | |
1107 | * sets non-multicast mode on, different from the device's defaults. | |
1108 | * | |
1109 | * Note that the device's defaults can mismatch the driver's | |
1110 | * configuration only at live migration. | |
1111 | */ | |
1112 | if (n->nomulti) { | |
1d7e2a8f HJ |
1113 | r = vhost_vdpa_net_load_rx_mode(s, out_cursor, in_cursor, |
1114 | VIRTIO_NET_CTRL_RX_NOMULTI, 1); | |
24e59cfe HJ |
1115 | if (r < 0) { |
1116 | return r; | |
4fd180c7 HJ |
1117 | } |
1118 | } | |
1119 | ||
1120 | /* | |
1121 | * According to virtio_net_reset(), device turns non-unicast mode | |
1122 | * off by default. | |
1123 | * | |
1124 | * Therefore, QEMU should only send this CVQ command if the driver | |
1125 | * sets non-unicast mode on, different from the device's defaults. | |
1126 | * | |
1127 | * Note that the device's defaults can mismatch the driver's | |
1128 | * configuration only at live migration. | |
1129 | */ | |
1130 | if (n->nouni) { | |
1d7e2a8f HJ |
1131 | r = vhost_vdpa_net_load_rx_mode(s, out_cursor, in_cursor, |
1132 | VIRTIO_NET_CTRL_RX_NOUNI, 1); | |
24e59cfe HJ |
1133 | if (r < 0) { |
1134 | return r; | |
4fd180c7 HJ |
1135 | } |
1136 | } | |
1137 | ||
1138 | /* | |
1139 | * According to virtio_net_reset(), device turns non-broadcast mode | |
1140 | * off by default. | |
1141 | * | |
1142 | * Therefore, QEMU should only send this CVQ command if the driver | |
1143 | * sets non-broadcast mode on, different from the device's defaults. | |
1144 | * | |
1145 | * Note that the device's defaults can mismatch the driver's | |
1146 | * configuration only at live migration. | |
1147 | */ | |
1148 | if (n->nobcast) { | |
1d7e2a8f HJ |
1149 | r = vhost_vdpa_net_load_rx_mode(s, out_cursor, in_cursor, |
1150 | VIRTIO_NET_CTRL_RX_NOBCAST, 1); | |
24e59cfe HJ |
1151 | if (r < 0) { |
1152 | return r; | |
4fd180c7 HJ |
1153 | } |
1154 | } | |
1155 | ||
b12f907e HJ |
1156 | return 0; |
1157 | } | |
1158 | ||
8f7e9967 HJ |
1159 | static int vhost_vdpa_net_load_single_vlan(VhostVDPAState *s, |
1160 | const VirtIONet *n, | |
1d7e2a8f HJ |
1161 | struct iovec *out_cursor, |
1162 | struct iovec *in_cursor, | |
8f7e9967 HJ |
1163 | uint16_t vid) |
1164 | { | |
1165 | const struct iovec data = { | |
1166 | .iov_base = &vid, | |
1167 | .iov_len = sizeof(vid), | |
1168 | }; | |
acec5f68 HJ |
1169 | ssize_t r = vhost_vdpa_net_load_cmd(s, out_cursor, in_cursor, |
1170 | VIRTIO_NET_CTRL_VLAN, | |
1171 | VIRTIO_NET_CTRL_VLAN_ADD, | |
1172 | &data, 1); | |
1173 | if (unlikely(r < 0)) { | |
1174 | return r; | |
8f7e9967 HJ |
1175 | } |
1176 | ||
1177 | return 0; | |
1178 | } | |
1179 | ||
1180 | static int vhost_vdpa_net_load_vlan(VhostVDPAState *s, | |
1d7e2a8f HJ |
1181 | const VirtIONet *n, |
1182 | struct iovec *out_cursor, | |
1183 | struct iovec *in_cursor) | |
8f7e9967 HJ |
1184 | { |
1185 | int r; | |
1186 | ||
1187 | if (!virtio_vdev_has_feature(&n->parent_obj, VIRTIO_NET_F_CTRL_VLAN)) { | |
1188 | return 0; | |
1189 | } | |
1190 | ||
1191 | for (int i = 0; i < MAX_VLAN >> 5; i++) { | |
1192 | for (int j = 0; n->vlans[i] && j <= 0x1f; j++) { | |
1193 | if (n->vlans[i] & (1U << j)) { | |
1d7e2a8f HJ |
1194 | r = vhost_vdpa_net_load_single_vlan(s, n, out_cursor, |
1195 | in_cursor, (i << 5) + j); | |
8f7e9967 HJ |
1196 | if (unlikely(r != 0)) { |
1197 | return r; | |
1198 | } | |
1199 | } | |
1200 | } | |
1201 | } | |
1202 | ||
1203 | return 0; | |
1204 | } | |
1205 | ||
f3fada59 | 1206 | static int vhost_vdpa_net_cvq_load(NetClientState *nc) |
dd036d8d EP |
1207 | { |
1208 | VhostVDPAState *s = DO_UPCAST(VhostVDPAState, nc, nc); | |
f73c0c43 | 1209 | struct vhost_vdpa *v = &s->vhost_vdpa; |
dd036d8d | 1210 | const VirtIONet *n; |
f73c0c43 | 1211 | int r; |
1d7e2a8f | 1212 | struct iovec out_cursor, in_cursor; |
dd036d8d EP |
1213 | |
1214 | assert(nc->info->type == NET_CLIENT_DRIVER_VHOST_VDPA); | |
1215 | ||
6c482547 | 1216 | vhost_vdpa_set_vring_ready(v, v->dev->vq_index); |
dd036d8d | 1217 | |
6c482547 EP |
1218 | if (v->shadow_vqs_enabled) { |
1219 | n = VIRTIO_NET(v->dev->vdev); | |
1d7e2a8f HJ |
1220 | vhost_vdpa_net_load_cursor_reset(s, &out_cursor, &in_cursor); |
1221 | r = vhost_vdpa_net_load_mac(s, n, &out_cursor, &in_cursor); | |
6c482547 EP |
1222 | if (unlikely(r < 0)) { |
1223 | return r; | |
1224 | } | |
1d7e2a8f | 1225 | r = vhost_vdpa_net_load_mq(s, n, &out_cursor, &in_cursor); |
6c482547 EP |
1226 | if (unlikely(r)) { |
1227 | return r; | |
1228 | } | |
1d7e2a8f | 1229 | r = vhost_vdpa_net_load_offloads(s, n, &out_cursor, &in_cursor); |
6c482547 EP |
1230 | if (unlikely(r)) { |
1231 | return r; | |
1232 | } | |
1d7e2a8f | 1233 | r = vhost_vdpa_net_load_rx(s, n, &out_cursor, &in_cursor); |
6c482547 EP |
1234 | if (unlikely(r)) { |
1235 | return r; | |
1236 | } | |
1d7e2a8f | 1237 | r = vhost_vdpa_net_load_vlan(s, n, &out_cursor, &in_cursor); |
6c482547 EP |
1238 | if (unlikely(r)) { |
1239 | return r; | |
1240 | } | |
acec5f68 HJ |
1241 | |
1242 | /* | |
1243 | * We need to poll and check all pending device's used buffers. | |
1244 | * | |
1245 | * We can poll here since we've had BQL from the time | |
1246 | * we sent the descriptor. | |
1247 | */ | |
1248 | r = vhost_vdpa_net_svq_flush(s, in_cursor.iov_base - (void *)s->status); | |
6c482547 EP |
1249 | if (unlikely(r)) { |
1250 | return r; | |
1251 | } | |
b12f907e | 1252 | } |
6c482547 EP |
1253 | |
1254 | for (int i = 0; i < v->dev->vq_index; ++i) { | |
1255 | vhost_vdpa_set_vring_ready(v, i); | |
8f7e9967 | 1256 | } |
dd036d8d EP |
1257 | |
1258 | return 0; | |
1259 | } | |
1260 | ||
f8972b56 EP |
1261 | static NetClientInfo net_vhost_vdpa_cvq_info = { |
1262 | .type = NET_CLIENT_DRIVER_VHOST_VDPA, | |
1263 | .size = sizeof(VhostVDPAState), | |
1264 | .receive = vhost_vdpa_receive, | |
7a7f87e9 | 1265 | .start = vhost_vdpa_net_cvq_start, |
f3fada59 | 1266 | .load = vhost_vdpa_net_cvq_load, |
7a7f87e9 | 1267 | .stop = vhost_vdpa_net_cvq_stop, |
f8972b56 EP |
1268 | .cleanup = vhost_vdpa_cleanup, |
1269 | .has_vnet_hdr = vhost_vdpa_has_vnet_hdr, | |
1270 | .has_ufo = vhost_vdpa_has_ufo, | |
1271 | .check_peer_type = vhost_vdpa_check_peer_type, | |
d1fd2d31 | 1272 | .set_steering_ebpf = vhost_vdpa_set_steering_ebpf, |
f8972b56 EP |
1273 | }; |
1274 | ||
fee364e4 HJ |
1275 | /* |
1276 | * Forward the excessive VIRTIO_NET_CTRL_MAC_TABLE_SET CVQ command to | |
1277 | * vdpa device. | |
1278 | * | |
1279 | * Considering that QEMU cannot send the entire filter table to the | |
1280 | * vdpa device, it should send the VIRTIO_NET_CTRL_RX_PROMISC CVQ | |
1281 | * command to enable promiscuous mode to receive all packets, | |
1282 | * according to VirtIO standard, "Since there are no guarantees, | |
1283 | * it can use a hash filter or silently switch to allmulti or | |
1284 | * promiscuous mode if it is given too many addresses.". | |
1285 | * | |
1286 | * Since QEMU ignores MAC addresses beyond `MAC_TABLE_ENTRIES` and | |
1287 | * marks `n->mac_table.x_overflow` accordingly, it should have | |
1288 | * the same effect on the device model to receive | |
1289 | * (`MAC_TABLE_ENTRIES` + 1) or more non-multicast MAC addresses. | |
1290 | * The same applies to multicast MAC addresses. | |
1291 | * | |
1292 | * Therefore, QEMU can provide the device model with a fake | |
1293 | * VIRTIO_NET_CTRL_MAC_TABLE_SET command with (`MAC_TABLE_ENTRIES` + 1) | |
1294 | * non-multicast MAC addresses and (`MAC_TABLE_ENTRIES` + 1) multicast | |
1295 | * MAC addresses. This ensures that the device model marks | |
1296 | * `n->mac_table.uni_overflow` and `n->mac_table.multi_overflow`, | |
1297 | * allowing all packets to be received, which aligns with the | |
1298 | * state of the vdpa device. | |
1299 | */ | |
1300 | static int vhost_vdpa_net_excessive_mac_filter_cvq_add(VhostVDPAState *s, | |
1301 | VirtQueueElement *elem, | |
327dedb8 HJ |
1302 | struct iovec *out, |
1303 | const struct iovec *in) | |
fee364e4 HJ |
1304 | { |
1305 | struct virtio_net_ctrl_mac mac_data, *mac_ptr; | |
1306 | struct virtio_net_ctrl_hdr *hdr_ptr; | |
1307 | uint32_t cursor; | |
1308 | ssize_t r; | |
327dedb8 | 1309 | uint8_t on = 1; |
fee364e4 HJ |
1310 | |
1311 | /* parse the non-multicast MAC address entries from CVQ command */ | |
1312 | cursor = sizeof(*hdr_ptr); | |
1313 | r = iov_to_buf(elem->out_sg, elem->out_num, cursor, | |
1314 | &mac_data, sizeof(mac_data)); | |
1315 | if (unlikely(r != sizeof(mac_data))) { | |
1316 | /* | |
1317 | * If the CVQ command is invalid, we should simulate the vdpa device | |
1318 | * to reject the VIRTIO_NET_CTRL_MAC_TABLE_SET CVQ command | |
1319 | */ | |
1320 | *s->status = VIRTIO_NET_ERR; | |
1321 | return sizeof(*s->status); | |
1322 | } | |
1323 | cursor += sizeof(mac_data) + le32_to_cpu(mac_data.entries) * ETH_ALEN; | |
1324 | ||
1325 | /* parse the multicast MAC address entries from CVQ command */ | |
1326 | r = iov_to_buf(elem->out_sg, elem->out_num, cursor, | |
1327 | &mac_data, sizeof(mac_data)); | |
1328 | if (r != sizeof(mac_data)) { | |
1329 | /* | |
1330 | * If the CVQ command is invalid, we should simulate the vdpa device | |
1331 | * to reject the VIRTIO_NET_CTRL_MAC_TABLE_SET CVQ command | |
1332 | */ | |
1333 | *s->status = VIRTIO_NET_ERR; | |
1334 | return sizeof(*s->status); | |
1335 | } | |
1336 | cursor += sizeof(mac_data) + le32_to_cpu(mac_data.entries) * ETH_ALEN; | |
1337 | ||
1338 | /* validate the CVQ command */ | |
1339 | if (iov_size(elem->out_sg, elem->out_num) != cursor) { | |
1340 | /* | |
1341 | * If the CVQ command is invalid, we should simulate the vdpa device | |
1342 | * to reject the VIRTIO_NET_CTRL_MAC_TABLE_SET CVQ command | |
1343 | */ | |
1344 | *s->status = VIRTIO_NET_ERR; | |
1345 | return sizeof(*s->status); | |
1346 | } | |
1347 | ||
1348 | /* | |
1349 | * According to VirtIO standard, "Since there are no guarantees, | |
1350 | * it can use a hash filter or silently switch to allmulti or | |
1351 | * promiscuous mode if it is given too many addresses.". | |
1352 | * | |
1353 | * Therefore, considering that QEMU is unable to send the entire | |
1354 | * filter table to the vdpa device, it should send the | |
1355 | * VIRTIO_NET_CTRL_RX_PROMISC CVQ command to enable promiscuous mode | |
1356 | */ | |
327dedb8 HJ |
1357 | hdr_ptr = out->iov_base; |
1358 | out->iov_len = sizeof(*hdr_ptr) + sizeof(on); | |
1359 | ||
1360 | hdr_ptr->class = VIRTIO_NET_CTRL_RX; | |
1361 | hdr_ptr->cmd = VIRTIO_NET_CTRL_RX_PROMISC; | |
1362 | iov_from_buf(out, 1, sizeof(*hdr_ptr), &on, sizeof(on)); | |
1363 | r = vhost_vdpa_net_cvq_add(s, out, 1, in, 1); | |
fee364e4 HJ |
1364 | if (unlikely(r < 0)) { |
1365 | return r; | |
1366 | } | |
a864a321 HJ |
1367 | |
1368 | /* | |
1369 | * We can poll here since we've had BQL from the time | |
1370 | * we sent the descriptor. | |
1371 | */ | |
1372 | r = vhost_vdpa_net_svq_poll(s, 1); | |
1373 | if (unlikely(r < sizeof(*s->status))) { | |
1374 | return r; | |
1375 | } | |
fee364e4 HJ |
1376 | if (*s->status != VIRTIO_NET_OK) { |
1377 | return sizeof(*s->status); | |
1378 | } | |
1379 | ||
1380 | /* | |
1381 | * QEMU should also send a fake VIRTIO_NET_CTRL_MAC_TABLE_SET CVQ | |
1382 | * command to the device model, including (`MAC_TABLE_ENTRIES` + 1) | |
1383 | * non-multicast MAC addresses and (`MAC_TABLE_ENTRIES` + 1) | |
1384 | * multicast MAC addresses. | |
1385 | * | |
1386 | * By doing so, the device model can mark `n->mac_table.uni_overflow` | |
1387 | * and `n->mac_table.multi_overflow`, enabling all packets to be | |
1388 | * received, which aligns with the state of the vdpa device. | |
1389 | */ | |
1390 | cursor = 0; | |
1391 | uint32_t fake_uni_entries = MAC_TABLE_ENTRIES + 1, | |
1392 | fake_mul_entries = MAC_TABLE_ENTRIES + 1, | |
1393 | fake_cvq_size = sizeof(struct virtio_net_ctrl_hdr) + | |
1394 | sizeof(mac_data) + fake_uni_entries * ETH_ALEN + | |
1395 | sizeof(mac_data) + fake_mul_entries * ETH_ALEN; | |
1396 | ||
1397 | assert(fake_cvq_size < vhost_vdpa_net_cvq_cmd_page_len()); | |
1398 | out->iov_len = fake_cvq_size; | |
1399 | ||
1400 | /* pack the header for fake CVQ command */ | |
1401 | hdr_ptr = out->iov_base + cursor; | |
1402 | hdr_ptr->class = VIRTIO_NET_CTRL_MAC; | |
1403 | hdr_ptr->cmd = VIRTIO_NET_CTRL_MAC_TABLE_SET; | |
1404 | cursor += sizeof(*hdr_ptr); | |
1405 | ||
1406 | /* | |
1407 | * Pack the non-multicast MAC addresses part for fake CVQ command. | |
1408 | * | |
1409 | * According to virtio_net_handle_mac(), QEMU doesn't verify the MAC | |
0a19d879 | 1410 | * addresses provided in CVQ command. Therefore, only the entries |
fee364e4 HJ |
1411 | * field need to be prepared in the CVQ command. |
1412 | */ | |
1413 | mac_ptr = out->iov_base + cursor; | |
1414 | mac_ptr->entries = cpu_to_le32(fake_uni_entries); | |
1415 | cursor += sizeof(*mac_ptr) + fake_uni_entries * ETH_ALEN; | |
1416 | ||
1417 | /* | |
1418 | * Pack the multicast MAC addresses part for fake CVQ command. | |
1419 | * | |
1420 | * According to virtio_net_handle_mac(), QEMU doesn't verify the MAC | |
0a19d879 | 1421 | * addresses provided in CVQ command. Therefore, only the entries |
fee364e4 HJ |
1422 | * field need to be prepared in the CVQ command. |
1423 | */ | |
1424 | mac_ptr = out->iov_base + cursor; | |
1425 | mac_ptr->entries = cpu_to_le32(fake_mul_entries); | |
1426 | ||
1427 | /* | |
1428 | * Simulating QEMU poll a vdpa device used buffer | |
1429 | * for VIRTIO_NET_CTRL_MAC_TABLE_SET CVQ command | |
1430 | */ | |
1431 | return sizeof(*s->status); | |
1432 | } | |
1433 | ||
2df4dd31 EP |
1434 | /** |
1435 | * Validate and copy control virtqueue commands. | |
1436 | * | |
1437 | * Following QEMU guidelines, we offer a copy of the buffers to the device to | |
1438 | * prevent TOCTOU bugs. | |
bd907ae4 EP |
1439 | */ |
1440 | static int vhost_vdpa_net_handle_ctrl_avail(VhostShadowVirtqueue *svq, | |
1441 | VirtQueueElement *elem, | |
1442 | void *opaque) | |
1443 | { | |
2df4dd31 | 1444 | VhostVDPAState *s = opaque; |
be4278b6 | 1445 | size_t in_len; |
45c41018 | 1446 | const struct virtio_net_ctrl_hdr *ctrl; |
bd907ae4 | 1447 | virtio_net_ctrl_ack status = VIRTIO_NET_ERR; |
7a7f87e9 EP |
1448 | /* Out buffer sent to both the vdpa device and the device model */ |
1449 | struct iovec out = { | |
1450 | .iov_base = s->cvq_cmd_out_buffer, | |
1451 | }; | |
2df4dd31 | 1452 | /* in buffer used for device model */ |
0e6bff0d | 1453 | const struct iovec model_in = { |
2df4dd31 EP |
1454 | .iov_base = &status, |
1455 | .iov_len = sizeof(status), | |
1456 | }; | |
0e6bff0d HJ |
1457 | /* in buffer used for vdpa device */ |
1458 | const struct iovec vdpa_in = { | |
1459 | .iov_base = s->status, | |
1460 | .iov_len = sizeof(*s->status), | |
1461 | }; | |
be4278b6 | 1462 | ssize_t dev_written = -EINVAL; |
2df4dd31 | 1463 | |
7a7f87e9 EP |
1464 | out.iov_len = iov_to_buf(elem->out_sg, elem->out_num, 0, |
1465 | s->cvq_cmd_out_buffer, | |
fee364e4 | 1466 | vhost_vdpa_net_cvq_cmd_page_len()); |
45c41018 HJ |
1467 | |
1468 | ctrl = s->cvq_cmd_out_buffer; | |
1469 | if (ctrl->class == VIRTIO_NET_CTRL_ANNOUNCE) { | |
3f9a3eeb EP |
1470 | /* |
1471 | * Guest announce capability is emulated by qemu, so don't forward to | |
1472 | * the device. | |
1473 | */ | |
1474 | dev_written = sizeof(status); | |
1475 | *s->status = VIRTIO_NET_OK; | |
fee364e4 HJ |
1476 | } else if (unlikely(ctrl->class == VIRTIO_NET_CTRL_MAC && |
1477 | ctrl->cmd == VIRTIO_NET_CTRL_MAC_TABLE_SET && | |
1478 | iov_size(elem->out_sg, elem->out_num) > out.iov_len)) { | |
1479 | /* | |
1480 | * Due to the size limitation of the out buffer sent to the vdpa device, | |
1481 | * which is determined by vhost_vdpa_net_cvq_cmd_page_len(), excessive | |
1482 | * MAC addresses set by the driver for the filter table can cause | |
1483 | * truncation of the CVQ command in QEMU. As a result, the vdpa device | |
1484 | * rejects the flawed CVQ command. | |
1485 | * | |
1486 | * Therefore, QEMU must handle this situation instead of sending | |
0a19d879 | 1487 | * the CVQ command directly. |
fee364e4 HJ |
1488 | */ |
1489 | dev_written = vhost_vdpa_net_excessive_mac_filter_cvq_add(s, elem, | |
327dedb8 | 1490 | &out, &vdpa_in); |
fee364e4 HJ |
1491 | if (unlikely(dev_written < 0)) { |
1492 | goto out; | |
1493 | } | |
3f9a3eeb | 1494 | } else { |
a864a321 HJ |
1495 | ssize_t r; |
1496 | r = vhost_vdpa_net_cvq_add(s, &out, 1, &vdpa_in, 1); | |
1497 | if (unlikely(r < 0)) { | |
1498 | dev_written = r; | |
3f9a3eeb EP |
1499 | goto out; |
1500 | } | |
a864a321 HJ |
1501 | |
1502 | /* | |
1503 | * We can poll here since we've had BQL from the time | |
1504 | * we sent the descriptor. | |
1505 | */ | |
1506 | dev_written = vhost_vdpa_net_svq_poll(s, 1); | |
bd907ae4 EP |
1507 | } |
1508 | ||
bd907ae4 EP |
1509 | if (unlikely(dev_written < sizeof(status))) { |
1510 | error_report("Insufficient written data (%zu)", dev_written); | |
2df4dd31 EP |
1511 | goto out; |
1512 | } | |
1513 | ||
17fb889f | 1514 | if (*s->status != VIRTIO_NET_OK) { |
d45243bc | 1515 | goto out; |
2df4dd31 EP |
1516 | } |
1517 | ||
1518 | status = VIRTIO_NET_ERR; | |
0e6bff0d | 1519 | virtio_net_handle_ctrl_iov(svq->vdev, &model_in, 1, &out, 1); |
2df4dd31 EP |
1520 | if (status != VIRTIO_NET_OK) { |
1521 | error_report("Bad CVQ processing in model"); | |
bd907ae4 EP |
1522 | } |
1523 | ||
1524 | out: | |
1525 | in_len = iov_from_buf(elem->in_sg, elem->in_num, 0, &status, | |
1526 | sizeof(status)); | |
1527 | if (unlikely(in_len < sizeof(status))) { | |
1528 | error_report("Bad device CVQ written length"); | |
1529 | } | |
1530 | vhost_svq_push_elem(svq, elem, MIN(in_len, sizeof(status))); | |
031b1aba HJ |
1531 | /* |
1532 | * `elem` belongs to vhost_vdpa_net_handle_ctrl_avail() only when | |
1533 | * the function successfully forwards the CVQ command, indicated | |
1534 | * by a non-negative value of `dev_written`. Otherwise, it still | |
1535 | * belongs to SVQ. | |
1536 | * This function should only free the `elem` when it owns. | |
1537 | */ | |
1538 | if (dev_written >= 0) { | |
1539 | g_free(elem); | |
1540 | } | |
be4278b6 | 1541 | return dev_written < 0 ? dev_written : 0; |
bd907ae4 EP |
1542 | } |
1543 | ||
1544 | static const VhostShadowVirtqueueOps vhost_vdpa_net_svq_ops = { | |
1545 | .avail_handler = vhost_vdpa_net_handle_ctrl_avail, | |
1546 | }; | |
1547 | ||
152128d6 EP |
1548 | /** |
1549 | * Probe if CVQ is isolated | |
1550 | * | |
1551 | * @device_fd The vdpa device fd | |
1552 | * @features Features offered by the device. | |
1553 | * @cvq_index The control vq pair index | |
1554 | * | |
1555 | * Returns <0 in case of failure, 0 if false and 1 if true. | |
1556 | */ | |
1557 | static int vhost_vdpa_probe_cvq_isolation(int device_fd, uint64_t features, | |
1558 | int cvq_index, Error **errp) | |
1559 | { | |
1560 | uint64_t backend_features; | |
1561 | int64_t cvq_group; | |
1562 | uint8_t status = VIRTIO_CONFIG_S_ACKNOWLEDGE | | |
845ec38a | 1563 | VIRTIO_CONFIG_S_DRIVER; |
152128d6 EP |
1564 | int r; |
1565 | ||
1566 | ERRP_GUARD(); | |
1567 | ||
1568 | r = ioctl(device_fd, VHOST_GET_BACKEND_FEATURES, &backend_features); | |
1569 | if (unlikely(r < 0)) { | |
1570 | error_setg_errno(errp, errno, "Cannot get vdpa backend_features"); | |
1571 | return r; | |
1572 | } | |
1573 | ||
1574 | if (!(backend_features & BIT_ULL(VHOST_BACKEND_F_IOTLB_ASID))) { | |
1575 | return 0; | |
1576 | } | |
1577 | ||
845ec38a EP |
1578 | r = ioctl(device_fd, VHOST_VDPA_SET_STATUS, &status); |
1579 | if (unlikely(r)) { | |
1580 | error_setg_errno(errp, -r, "Cannot set device status"); | |
1581 | goto out; | |
1582 | } | |
1583 | ||
152128d6 EP |
1584 | r = ioctl(device_fd, VHOST_SET_FEATURES, &features); |
1585 | if (unlikely(r)) { | |
845ec38a | 1586 | error_setg_errno(errp, -r, "Cannot set features"); |
f1085882 | 1587 | goto out; |
152128d6 EP |
1588 | } |
1589 | ||
845ec38a | 1590 | status |= VIRTIO_CONFIG_S_FEATURES_OK; |
152128d6 EP |
1591 | r = ioctl(device_fd, VHOST_VDPA_SET_STATUS, &status); |
1592 | if (unlikely(r)) { | |
845ec38a | 1593 | error_setg_errno(errp, -r, "Cannot set device status"); |
152128d6 EP |
1594 | goto out; |
1595 | } | |
1596 | ||
1597 | cvq_group = vhost_vdpa_get_vring_group(device_fd, cvq_index, errp); | |
1598 | if (unlikely(cvq_group < 0)) { | |
1599 | if (cvq_group != -ENOTSUP) { | |
1600 | r = cvq_group; | |
1601 | goto out; | |
1602 | } | |
1603 | ||
1604 | /* | |
1605 | * The kernel report VHOST_BACKEND_F_IOTLB_ASID if the vdpa frontend | |
1606 | * support ASID even if the parent driver does not. The CVQ cannot be | |
1607 | * isolated in this case. | |
1608 | */ | |
1609 | error_free(*errp); | |
1610 | *errp = NULL; | |
1611 | r = 0; | |
1612 | goto out; | |
1613 | } | |
1614 | ||
1615 | for (int i = 0; i < cvq_index; ++i) { | |
1616 | int64_t group = vhost_vdpa_get_vring_group(device_fd, i, errp); | |
1617 | if (unlikely(group < 0)) { | |
1618 | r = group; | |
1619 | goto out; | |
1620 | } | |
1621 | ||
1622 | if (group == (int64_t)cvq_group) { | |
1623 | r = 0; | |
1624 | goto out; | |
1625 | } | |
1626 | } | |
1627 | ||
1628 | r = 1; | |
1629 | ||
1630 | out: | |
1631 | status = 0; | |
1632 | ioctl(device_fd, VHOST_VDPA_SET_STATUS, &status); | |
1633 | return r; | |
1634 | } | |
1635 | ||
654790b6 | 1636 | static NetClientState *net_vhost_vdpa_init(NetClientState *peer, |
a585fad2 EP |
1637 | const char *device, |
1638 | const char *name, | |
1639 | int vdpa_device_fd, | |
1640 | int queue_pair_index, | |
1641 | int nvqs, | |
1642 | bool is_datapath, | |
1643 | bool svq, | |
5c1ebd4c | 1644 | struct vhost_vdpa_iova_range iova_range, |
152128d6 | 1645 | uint64_t features, |
8c5e9809 | 1646 | VhostVDPAShared *shared, |
152128d6 | 1647 | Error **errp) |
1e0a84ea CL |
1648 | { |
1649 | NetClientState *nc = NULL; | |
1650 | VhostVDPAState *s; | |
1e0a84ea CL |
1651 | int ret = 0; |
1652 | assert(name); | |
e77db790 | 1653 | int cvq_isolated = 0; |
152128d6 | 1654 | |
40237840 JW |
1655 | if (is_datapath) { |
1656 | nc = qemu_new_net_client(&net_vhost_vdpa_info, peer, device, | |
1657 | name); | |
1658 | } else { | |
152128d6 EP |
1659 | cvq_isolated = vhost_vdpa_probe_cvq_isolation(vdpa_device_fd, features, |
1660 | queue_pair_index * 2, | |
1661 | errp); | |
1662 | if (unlikely(cvq_isolated < 0)) { | |
1663 | return NULL; | |
1664 | } | |
1665 | ||
f8972b56 | 1666 | nc = qemu_new_net_control_client(&net_vhost_vdpa_cvq_info, peer, |
40237840 JW |
1667 | device, name); |
1668 | } | |
53b85d95 | 1669 | qemu_set_info_str(nc, TYPE_VHOST_VDPA); |
1e0a84ea | 1670 | s = DO_UPCAST(VhostVDPAState, nc, nc); |
7327813d | 1671 | |
40237840 | 1672 | s->vhost_vdpa.index = queue_pair_index; |
7f211a28 | 1673 | s->always_svq = svq; |
d9cda213 | 1674 | s->migration_state.notify = NULL; |
1576dbb5 | 1675 | s->vhost_vdpa.shadow_vqs_enabled = svq; |
5c1ebd4c EP |
1676 | if (queue_pair_index == 0) { |
1677 | vhost_vdpa_net_valid_svq_features(features, | |
1678 | &s->vhost_vdpa.migration_blocker); | |
8c5e9809 | 1679 | s->vhost_vdpa.shared = g_new0(VhostVDPAShared, 1); |
f12b2498 | 1680 | s->vhost_vdpa.shared->device_fd = vdpa_device_fd; |
ae25ff41 | 1681 | s->vhost_vdpa.shared->iova_range = iova_range; |
a6e823d4 | 1682 | s->vhost_vdpa.shared->shadow_data = svq; |
5c1ebd4c | 1683 | } else if (!is_datapath) { |
babf8b87 EP |
1684 | s->cvq_cmd_out_buffer = mmap(NULL, vhost_vdpa_net_cvq_cmd_page_len(), |
1685 | PROT_READ | PROT_WRITE, | |
1686 | MAP_SHARED | MAP_ANONYMOUS, -1, 0); | |
1687 | s->status = mmap(NULL, vhost_vdpa_net_cvq_cmd_page_len(), | |
1688 | PROT_READ | PROT_WRITE, MAP_SHARED | MAP_ANONYMOUS, | |
1689 | -1, 0); | |
2df4dd31 | 1690 | |
bd907ae4 EP |
1691 | s->vhost_vdpa.shadow_vq_ops = &vhost_vdpa_net_svq_ops; |
1692 | s->vhost_vdpa.shadow_vq_ops_opaque = s; | |
152128d6 | 1693 | s->cvq_isolated = cvq_isolated; |
bd907ae4 | 1694 | } |
8c5e9809 EP |
1695 | if (queue_pair_index != 0) { |
1696 | s->vhost_vdpa.shared = shared; | |
1697 | } | |
1698 | ||
40237840 | 1699 | ret = vhost_vdpa_add(nc, (void *)&s->vhost_vdpa, queue_pair_index, nvqs); |
74af5eec | 1700 | if (ret) { |
74af5eec | 1701 | qemu_del_net_client(nc); |
654790b6 | 1702 | return NULL; |
74af5eec | 1703 | } |
8c5e9809 | 1704 | |
654790b6 | 1705 | return nc; |
1e0a84ea CL |
1706 | } |
1707 | ||
8170ab3f EP |
1708 | static int vhost_vdpa_get_features(int fd, uint64_t *features, Error **errp) |
1709 | { | |
1710 | int ret = ioctl(fd, VHOST_GET_FEATURES, features); | |
1711 | if (unlikely(ret < 0)) { | |
1712 | error_setg_errno(errp, errno, | |
1713 | "Fail to query features from vhost-vDPA device"); | |
1714 | } | |
1715 | return ret; | |
1716 | } | |
1717 | ||
1718 | static int vhost_vdpa_get_max_queue_pairs(int fd, uint64_t features, | |
1719 | int *has_cvq, Error **errp) | |
40237840 JW |
1720 | { |
1721 | unsigned long config_size = offsetof(struct vhost_vdpa_config, buf); | |
cd523a41 | 1722 | g_autofree struct vhost_vdpa_config *config = NULL; |
40237840 | 1723 | __virtio16 *max_queue_pairs; |
40237840 JW |
1724 | int ret; |
1725 | ||
40237840 JW |
1726 | if (features & (1 << VIRTIO_NET_F_CTRL_VQ)) { |
1727 | *has_cvq = 1; | |
1728 | } else { | |
1729 | *has_cvq = 0; | |
1730 | } | |
1731 | ||
1732 | if (features & (1 << VIRTIO_NET_F_MQ)) { | |
1733 | config = g_malloc0(config_size + sizeof(*max_queue_pairs)); | |
1734 | config->off = offsetof(struct virtio_net_config, max_virtqueue_pairs); | |
1735 | config->len = sizeof(*max_queue_pairs); | |
1736 | ||
1737 | ret = ioctl(fd, VHOST_VDPA_GET_CONFIG, config); | |
1738 | if (ret) { | |
1739 | error_setg(errp, "Fail to get config from vhost-vDPA device"); | |
1740 | return -ret; | |
1741 | } | |
1742 | ||
1743 | max_queue_pairs = (__virtio16 *)&config->buf; | |
1744 | ||
1745 | return lduw_le_p(max_queue_pairs); | |
1746 | } | |
1747 | ||
1748 | return 1; | |
1749 | } | |
1750 | ||
1e0a84ea CL |
1751 | int net_init_vhost_vdpa(const Netdev *netdev, const char *name, |
1752 | NetClientState *peer, Error **errp) | |
1753 | { | |
1754 | const NetdevVhostVDPAOptions *opts; | |
8170ab3f | 1755 | uint64_t features; |
654790b6 | 1756 | int vdpa_device_fd; |
eb3cb751 | 1757 | g_autofree NetClientState **ncs = NULL; |
a585fad2 | 1758 | struct vhost_vdpa_iova_range iova_range; |
eb3cb751 | 1759 | NetClientState *nc; |
aed5da45 | 1760 | int queue_pairs, r, i = 0, has_cvq = 0; |
1e0a84ea CL |
1761 | |
1762 | assert(netdev->type == NET_CLIENT_DRIVER_VHOST_VDPA); | |
1763 | opts = &netdev->u.vhost_vdpa; | |
7480874a | 1764 | if (!opts->vhostdev && !opts->vhostfd) { |
8801ccd0 SWL |
1765 | error_setg(errp, |
1766 | "vhost-vdpa: neither vhostdev= nor vhostfd= was specified"); | |
c8295404 EP |
1767 | return -1; |
1768 | } | |
7327813d | 1769 | |
7480874a | 1770 | if (opts->vhostdev && opts->vhostfd) { |
8801ccd0 SWL |
1771 | error_setg(errp, |
1772 | "vhost-vdpa: vhostdev= and vhostfd= are mutually exclusive"); | |
1773 | return -1; | |
1774 | } | |
1775 | ||
7480874a | 1776 | if (opts->vhostdev) { |
8801ccd0 SWL |
1777 | vdpa_device_fd = qemu_open(opts->vhostdev, O_RDWR, errp); |
1778 | if (vdpa_device_fd == -1) { | |
1779 | return -errno; | |
1780 | } | |
5107fd3e PM |
1781 | } else { |
1782 | /* has_vhostfd */ | |
8801ccd0 SWL |
1783 | vdpa_device_fd = monitor_fd_param(monitor_cur(), opts->vhostfd, errp); |
1784 | if (vdpa_device_fd == -1) { | |
1785 | error_prepend(errp, "vhost-vdpa: unable to parse vhostfd: "); | |
1786 | return -1; | |
1787 | } | |
7327813d JW |
1788 | } |
1789 | ||
8170ab3f EP |
1790 | r = vhost_vdpa_get_features(vdpa_device_fd, &features, errp); |
1791 | if (unlikely(r < 0)) { | |
aed5da45 | 1792 | goto err; |
8170ab3f EP |
1793 | } |
1794 | ||
1795 | queue_pairs = vhost_vdpa_get_max_queue_pairs(vdpa_device_fd, features, | |
40237840 JW |
1796 | &has_cvq, errp); |
1797 | if (queue_pairs < 0) { | |
7327813d | 1798 | qemu_close(vdpa_device_fd); |
40237840 JW |
1799 | return queue_pairs; |
1800 | } | |
1801 | ||
bf7a2ad8 LM |
1802 | r = vhost_vdpa_get_iova_range(vdpa_device_fd, &iova_range); |
1803 | if (unlikely(r < 0)) { | |
1804 | error_setg(errp, "vhost-vdpa: get iova range failed: %s", | |
1805 | strerror(-r)); | |
1806 | goto err; | |
1807 | } | |
1808 | ||
00ef422e EP |
1809 | if (opts->x_svq && !vhost_vdpa_net_valid_svq_features(features, errp)) { |
1810 | goto err; | |
1576dbb5 EP |
1811 | } |
1812 | ||
40237840 JW |
1813 | ncs = g_malloc0(sizeof(*ncs) * queue_pairs); |
1814 | ||
1815 | for (i = 0; i < queue_pairs; i++) { | |
8c5e9809 EP |
1816 | VhostVDPAShared *shared = NULL; |
1817 | ||
1818 | if (i) { | |
1819 | shared = DO_UPCAST(VhostVDPAState, nc, ncs[0])->vhost_vdpa.shared; | |
1820 | } | |
40237840 | 1821 | ncs[i] = net_vhost_vdpa_init(peer, TYPE_VHOST_VDPA, name, |
1576dbb5 | 1822 | vdpa_device_fd, i, 2, true, opts->x_svq, |
8c5e9809 | 1823 | iova_range, features, shared, errp); |
40237840 JW |
1824 | if (!ncs[i]) |
1825 | goto err; | |
7327813d JW |
1826 | } |
1827 | ||
40237840 | 1828 | if (has_cvq) { |
8c5e9809 EP |
1829 | VhostVDPAState *s0 = DO_UPCAST(VhostVDPAState, nc, ncs[0]); |
1830 | VhostVDPAShared *shared = s0->vhost_vdpa.shared; | |
1831 | ||
40237840 | 1832 | nc = net_vhost_vdpa_init(peer, TYPE_VHOST_VDPA, name, |
1576dbb5 | 1833 | vdpa_device_fd, i, 1, false, |
8c5e9809 EP |
1834 | opts->x_svq, iova_range, features, shared, |
1835 | errp); | |
40237840 JW |
1836 | if (!nc) |
1837 | goto err; | |
1838 | } | |
1839 | ||
654790b6 | 1840 | return 0; |
40237840 JW |
1841 | |
1842 | err: | |
1843 | if (i) { | |
9bd05507 SWL |
1844 | for (i--; i >= 0; i--) { |
1845 | qemu_del_net_client(ncs[i]); | |
1846 | } | |
40237840 | 1847 | } |
1576dbb5 | 1848 | |
40237840 | 1849 | qemu_close(vdpa_device_fd); |
40237840 JW |
1850 | |
1851 | return -1; | |
1e0a84ea | 1852 | } |