]> git.proxmox.com Git - mirror_qemu.git/blame - hw/virtio/vhost.c
nvdimm: use NVDIMM_ACPI_IO_LEN for the proper IO size
[mirror_qemu.git] / hw / virtio / vhost.c
CommitLineData
d5970055
MT
1/*
2 * vhost support
3 *
4 * Copyright Red Hat, Inc. 2010
5 *
6 * Authors:
7 * Michael S. Tsirkin <mst@redhat.com>
8 *
9 * This work is licensed under the terms of the GNU GPL, version 2. See
10 * the COPYING file in the top-level directory.
6b620ca3
PB
11 *
12 * Contributions after 2012-01-13 are licensed under the terms of the
13 * GNU GPL, version 2 or (at your option) any later version.
d5970055
MT
14 */
15
9b8bfe21 16#include "qemu/osdep.h"
da34e65c 17#include "qapi/error.h"
0d09e41a 18#include "hw/virtio/vhost.h"
d5970055 19#include "hw/hw.h"
5444e768 20#include "qemu/atomic.h"
1de7afc9 21#include "qemu/range.h"
04b7a152 22#include "qemu/error-report.h"
15324404 23#include "qemu/memfd.h"
18658a3c 24#include "standard-headers/linux/vhost_types.h"
022c62cb 25#include "exec/address-spaces.h"
1c819449 26#include "hw/virtio/virtio-bus.h"
04b7a152 27#include "hw/virtio/virtio-access.h"
795c40b8 28#include "migration/blocker.h"
c471ad0e 29#include "sysemu/dma.h"
aa3c40f6 30#include "trace.h"
d5970055 31
162bba7f
MAL
32/* enabled until disconnected backend stabilizes */
33#define _VHOST_DEBUG 1
34
35#ifdef _VHOST_DEBUG
36#define VHOST_OPS_DEBUG(fmt, ...) \
37 do { error_report(fmt ": %s (%d)", ## __VA_ARGS__, \
38 strerror(errno), errno); } while (0)
39#else
40#define VHOST_OPS_DEBUG(fmt, ...) \
41 do { } while (0)
42#endif
43
309750fa 44static struct vhost_log *vhost_log;
15324404 45static struct vhost_log *vhost_log_shm;
309750fa 46
2ce68e4c
IM
47static unsigned int used_memslots;
48static QLIST_HEAD(, vhost_dev) vhost_devices =
49 QLIST_HEAD_INITIALIZER(vhost_devices);
50
51bool vhost_has_free_slot(void)
52{
53 unsigned int slots_limit = ~0U;
54 struct vhost_dev *hdev;
55
56 QLIST_FOREACH(hdev, &vhost_devices, entry) {
57 unsigned int r = hdev->vhost_ops->vhost_backend_memslots_limit(hdev);
58 slots_limit = MIN(slots_limit, r);
59 }
60 return slots_limit > used_memslots;
61}
62
d5970055 63static void vhost_dev_sync_region(struct vhost_dev *dev,
2817b260 64 MemoryRegionSection *section,
d5970055
MT
65 uint64_t mfirst, uint64_t mlast,
66 uint64_t rfirst, uint64_t rlast)
67{
309750fa
JW
68 vhost_log_chunk_t *log = dev->log->log;
69
d5970055
MT
70 uint64_t start = MAX(mfirst, rfirst);
71 uint64_t end = MIN(mlast, rlast);
309750fa
JW
72 vhost_log_chunk_t *from = log + start / VHOST_LOG_CHUNK;
73 vhost_log_chunk_t *to = log + end / VHOST_LOG_CHUNK + 1;
33c5793b 74 uint64_t addr = QEMU_ALIGN_DOWN(start, VHOST_LOG_CHUNK);
d5970055 75
d5970055
MT
76 if (end < start) {
77 return;
78 }
e314672a 79 assert(end / VHOST_LOG_CHUNK < dev->log_size);
fbbaf9ae 80 assert(start / VHOST_LOG_CHUNK < dev->log_size);
e314672a 81
d5970055
MT
82 for (;from < to; ++from) {
83 vhost_log_chunk_t log;
d5970055
MT
84 /* We first check with non-atomic: much cheaper,
85 * and we expect non-dirty to be the common case. */
86 if (!*from) {
0c600ce2 87 addr += VHOST_LOG_CHUNK;
d5970055
MT
88 continue;
89 }
5444e768
PB
90 /* Data must be read atomically. We don't really need barrier semantics
91 * but it's easier to use atomic_* than roll our own. */
92 log = atomic_xchg(from, 0);
747eb78b
NC
93 while (log) {
94 int bit = ctzl(log);
6b37a23d
MT
95 hwaddr page_addr;
96 hwaddr section_offset;
97 hwaddr mr_offset;
6b37a23d
MT
98 page_addr = addr + bit * VHOST_LOG_PAGE;
99 section_offset = page_addr - section->offset_within_address_space;
100 mr_offset = section_offset + section->offset_within_region;
101 memory_region_set_dirty(section->mr, mr_offset, VHOST_LOG_PAGE);
d5970055
MT
102 log &= ~(0x1ull << bit);
103 }
104 addr += VHOST_LOG_CHUNK;
105 }
106}
107
04097f7c 108static int vhost_sync_dirty_bitmap(struct vhost_dev *dev,
2817b260 109 MemoryRegionSection *section,
6b37a23d
MT
110 hwaddr first,
111 hwaddr last)
d5970055 112{
d5970055 113 int i;
6b37a23d
MT
114 hwaddr start_addr;
115 hwaddr end_addr;
04097f7c 116
d5970055
MT
117 if (!dev->log_enabled || !dev->started) {
118 return 0;
119 }
6b37a23d 120 start_addr = section->offset_within_address_space;
052e87b0 121 end_addr = range_get_last(start_addr, int128_get64(section->size));
6b37a23d
MT
122 start_addr = MAX(first, start_addr);
123 end_addr = MIN(last, end_addr);
124
d5970055
MT
125 for (i = 0; i < dev->mem->nregions; ++i) {
126 struct vhost_memory_region *reg = dev->mem->regions + i;
2817b260 127 vhost_dev_sync_region(dev, section, start_addr, end_addr,
d5970055
MT
128 reg->guest_phys_addr,
129 range_get_last(reg->guest_phys_addr,
130 reg->memory_size));
131 }
132 for (i = 0; i < dev->nvqs; ++i) {
133 struct vhost_virtqueue *vq = dev->vqs + i;
2817b260 134 vhost_dev_sync_region(dev, section, start_addr, end_addr, vq->used_phys,
d5970055
MT
135 range_get_last(vq->used_phys, vq->used_size));
136 }
137 return 0;
138}
139
04097f7c
AK
140static void vhost_log_sync(MemoryListener *listener,
141 MemoryRegionSection *section)
142{
143 struct vhost_dev *dev = container_of(listener, struct vhost_dev,
144 memory_listener);
6b37a23d
MT
145 vhost_sync_dirty_bitmap(dev, section, 0x0, ~0x0ULL);
146}
04097f7c 147
6b37a23d
MT
148static void vhost_log_sync_range(struct vhost_dev *dev,
149 hwaddr first, hwaddr last)
150{
151 int i;
152 /* FIXME: this is N^2 in number of sections */
153 for (i = 0; i < dev->n_mem_sections; ++i) {
154 MemoryRegionSection *section = &dev->mem_sections[i];
155 vhost_sync_dirty_bitmap(dev, section, first, last);
156 }
04097f7c
AK
157}
158
d5970055
MT
159static uint64_t vhost_get_log_size(struct vhost_dev *dev)
160{
161 uint64_t log_size = 0;
162 int i;
163 for (i = 0; i < dev->mem->nregions; ++i) {
164 struct vhost_memory_region *reg = dev->mem->regions + i;
165 uint64_t last = range_get_last(reg->guest_phys_addr,
166 reg->memory_size);
167 log_size = MAX(log_size, last / VHOST_LOG_CHUNK + 1);
168 }
169 for (i = 0; i < dev->nvqs; ++i) {
170 struct vhost_virtqueue *vq = dev->vqs + i;
171 uint64_t last = vq->used_phys + vq->used_size - 1;
172 log_size = MAX(log_size, last / VHOST_LOG_CHUNK + 1);
173 }
174 return log_size;
175}
15324404
MAL
176
177static struct vhost_log *vhost_log_alloc(uint64_t size, bool share)
309750fa 178{
0f2956f9 179 Error *err = NULL;
15324404
MAL
180 struct vhost_log *log;
181 uint64_t logsize = size * sizeof(*(log->log));
182 int fd = -1;
183
184 log = g_new0(struct vhost_log, 1);
185 if (share) {
186 log->log = qemu_memfd_alloc("vhost-log", logsize,
187 F_SEAL_GROW | F_SEAL_SHRINK | F_SEAL_SEAL,
0f2956f9
MAL
188 &fd, &err);
189 if (err) {
190 error_report_err(err);
191 g_free(log);
192 return NULL;
193 }
15324404
MAL
194 memset(log->log, 0, logsize);
195 } else {
196 log->log = g_malloc0(logsize);
197 }
309750fa
JW
198
199 log->size = size;
200 log->refcnt = 1;
15324404 201 log->fd = fd;
309750fa
JW
202
203 return log;
204}
205
15324404 206static struct vhost_log *vhost_log_get(uint64_t size, bool share)
309750fa 207{
15324404
MAL
208 struct vhost_log *log = share ? vhost_log_shm : vhost_log;
209
210 if (!log || log->size != size) {
211 log = vhost_log_alloc(size, share);
212 if (share) {
213 vhost_log_shm = log;
214 } else {
215 vhost_log = log;
216 }
309750fa 217 } else {
15324404 218 ++log->refcnt;
309750fa
JW
219 }
220
15324404 221 return log;
309750fa
JW
222}
223
224static void vhost_log_put(struct vhost_dev *dev, bool sync)
225{
226 struct vhost_log *log = dev->log;
227
228 if (!log) {
229 return;
230 }
231
232 --log->refcnt;
233 if (log->refcnt == 0) {
234 /* Sync only the range covered by the old log */
235 if (dev->log_size && sync) {
236 vhost_log_sync_range(dev, 0, dev->log_size * VHOST_LOG_CHUNK - 1);
237 }
15324404 238
309750fa 239 if (vhost_log == log) {
15324404 240 g_free(log->log);
309750fa 241 vhost_log = NULL;
15324404
MAL
242 } else if (vhost_log_shm == log) {
243 qemu_memfd_free(log->log, log->size * sizeof(*(log->log)),
244 log->fd);
245 vhost_log_shm = NULL;
309750fa 246 }
15324404 247
309750fa
JW
248 g_free(log);
249 }
5c0ba1be
FF
250
251 dev->log = NULL;
252 dev->log_size = 0;
309750fa 253}
d5970055 254
15324404
MAL
255static bool vhost_dev_log_is_shared(struct vhost_dev *dev)
256{
257 return dev->vhost_ops->vhost_requires_shm_log &&
258 dev->vhost_ops->vhost_requires_shm_log(dev);
259}
260
261static inline void vhost_dev_log_resize(struct vhost_dev *dev, uint64_t size)
d5970055 262{
15324404 263 struct vhost_log *log = vhost_log_get(size, vhost_dev_log_is_shared(dev));
309750fa 264 uint64_t log_base = (uintptr_t)log->log;
6b37a23d 265 int r;
6528499f 266
636f4ddd
MAL
267 /* inform backend of log switching, this must be done before
268 releasing the current log, to ensure no logging is lost */
9a78a5dd 269 r = dev->vhost_ops->vhost_set_log_base(dev, log_base, log);
162bba7f
MAL
270 if (r < 0) {
271 VHOST_OPS_DEBUG("vhost_set_log_base failed");
272 }
273
309750fa 274 vhost_log_put(dev, true);
d5970055
MT
275 dev->log = log;
276 dev->log_size = size;
277}
278
c471ad0e
JW
279static int vhost_dev_has_iommu(struct vhost_dev *dev)
280{
281 VirtIODevice *vdev = dev->vdev;
c471ad0e 282
375f74f4 283 return virtio_host_has_feature(vdev, VIRTIO_F_IOMMU_PLATFORM);
c471ad0e
JW
284}
285
286static void *vhost_memory_map(struct vhost_dev *dev, hwaddr addr,
287 hwaddr *plen, int is_write)
288{
289 if (!vhost_dev_has_iommu(dev)) {
290 return cpu_physical_memory_map(addr, plen, is_write);
291 } else {
292 return (void *)(uintptr_t)addr;
293 }
294}
295
296static void vhost_memory_unmap(struct vhost_dev *dev, void *buffer,
297 hwaddr len, int is_write,
298 hwaddr access_len)
299{
300 if (!vhost_dev_has_iommu(dev)) {
301 cpu_physical_memory_unmap(buffer, len, is_write, access_len);
302 }
303}
f1f9e6c5 304
0ca1fd2d
DDAG
305static int vhost_verify_ring_part_mapping(void *ring_hva,
306 uint64_t ring_gpa,
307 uint64_t ring_size,
308 void *reg_hva,
309 uint64_t reg_gpa,
310 uint64_t reg_size)
f1f9e6c5 311{
0ca1fd2d
DDAG
312 uint64_t hva_ring_offset;
313 uint64_t ring_last = range_get_last(ring_gpa, ring_size);
314 uint64_t reg_last = range_get_last(reg_gpa, reg_size);
f1f9e6c5 315
0ca1fd2d 316 if (ring_last < reg_gpa || ring_gpa > reg_last) {
f1f9e6c5
GK
317 return 0;
318 }
0ca1fd2d
DDAG
319 /* check that whole ring's is mapped */
320 if (ring_last > reg_last) {
321 return -ENOMEM;
f1f9e6c5 322 }
0ca1fd2d
DDAG
323 /* check that ring's MemoryRegion wasn't replaced */
324 hva_ring_offset = ring_gpa - reg_gpa;
325 if (ring_hva != reg_hva + hva_ring_offset) {
326 return -EBUSY;
f1f9e6c5 327 }
0ca1fd2d
DDAG
328
329 return 0;
f1f9e6c5
GK
330}
331
d5970055 332static int vhost_verify_ring_mappings(struct vhost_dev *dev,
0ca1fd2d
DDAG
333 void *reg_hva,
334 uint64_t reg_gpa,
335 uint64_t reg_size)
d5970055 336{
f1f9e6c5 337 int i, j;
8617343f 338 int r = 0;
f1f9e6c5
GK
339 const char *part_name[] = {
340 "descriptor table",
341 "available ring",
342 "used ring"
343 };
8617343f 344
aebbdbee
JW
345 if (vhost_dev_has_iommu(dev)) {
346 return 0;
347 }
348
f1f9e6c5 349 for (i = 0; i < dev->nvqs; ++i) {
d5970055 350 struct vhost_virtqueue *vq = dev->vqs + i;
d5970055 351
fb20fbb7
JH
352 if (vq->desc_phys == 0) {
353 continue;
354 }
355
f1f9e6c5 356 j = 0;
0ca1fd2d
DDAG
357 r = vhost_verify_ring_part_mapping(
358 vq->desc, vq->desc_phys, vq->desc_size,
359 reg_hva, reg_gpa, reg_size);
2fe45ec3 360 if (r) {
f1f9e6c5 361 break;
d5970055 362 }
f1f9e6c5
GK
363
364 j++;
0ca1fd2d 365 r = vhost_verify_ring_part_mapping(
9fac50c8 366 vq->avail, vq->avail_phys, vq->avail_size,
0ca1fd2d 367 reg_hva, reg_gpa, reg_size);
2fe45ec3 368 if (r) {
f1f9e6c5 369 break;
d5970055 370 }
f1f9e6c5
GK
371
372 j++;
0ca1fd2d 373 r = vhost_verify_ring_part_mapping(
9fac50c8 374 vq->used, vq->used_phys, vq->used_size,
0ca1fd2d 375 reg_hva, reg_gpa, reg_size);
2fe45ec3 376 if (r) {
f1f9e6c5 377 break;
d5970055 378 }
f1f9e6c5
GK
379 }
380
381 if (r == -ENOMEM) {
382 error_report("Unable to map %s for ring %d", part_name[j], i);
383 } else if (r == -EBUSY) {
384 error_report("%s relocated for ring %d", part_name[j], i);
d5970055 385 }
8617343f 386 return r;
d5970055
MT
387}
388
988a2775 389static bool vhost_section(struct vhost_dev *dev, MemoryRegionSection *section)
af603142 390{
aa3c40f6
DDAG
391 bool result;
392 bool log_dirty = memory_region_get_dirty_log_mask(section->mr) &
393 ~(1 << DIRTY_MEMORY_MIGRATION);
394 result = memory_region_is_ram(section->mr) &&
d56ec1e9 395 !memory_region_is_rom(section->mr);
aa3c40f6
DDAG
396
397 /* Vhost doesn't handle any block which is doing dirty-tracking other
398 * than migration; this typically fires on VGA areas.
399 */
400 result &= !log_dirty;
401
988a2775
TB
402 if (result && dev->vhost_ops->vhost_backend_mem_section_filter) {
403 result &=
404 dev->vhost_ops->vhost_backend_mem_section_filter(dev, section);
405 }
406
aa3c40f6
DDAG
407 trace_vhost_section(section->mr->name, result);
408 return result;
af603142
NB
409}
410
411static void vhost_begin(MemoryListener *listener)
412{
413 struct vhost_dev *dev = container_of(listener, struct vhost_dev,
414 memory_listener);
c44317ef
DDAG
415 dev->tmp_sections = NULL;
416 dev->n_tmp_sections = 0;
af603142 417}
d5970055 418
af603142
NB
419static void vhost_commit(MemoryListener *listener)
420{
421 struct vhost_dev *dev = container_of(listener, struct vhost_dev,
422 memory_listener);
c44317ef
DDAG
423 MemoryRegionSection *old_sections;
424 int n_old_sections;
af603142 425 uint64_t log_size;
ade6d081 426 size_t regions_size;
af603142 427 int r;
0ca1fd2d 428 int i;
ade6d081 429 bool changed = false;
af603142 430
ade6d081
DDAG
431 /* Note we can be called before the device is started, but then
432 * starting the device calls set_mem_table, so we need to have
433 * built the data structures.
434 */
c44317ef
DDAG
435 old_sections = dev->mem_sections;
436 n_old_sections = dev->n_mem_sections;
437 dev->mem_sections = dev->tmp_sections;
438 dev->n_mem_sections = dev->n_tmp_sections;
439
ade6d081
DDAG
440 if (dev->n_mem_sections != n_old_sections) {
441 changed = true;
442 } else {
443 /* Same size, lets check the contents */
444 changed = n_old_sections && memcmp(dev->mem_sections, old_sections,
445 n_old_sections * sizeof(old_sections[0])) != 0;
af603142 446 }
ade6d081
DDAG
447
448 trace_vhost_commit(dev->started, changed);
449 if (!changed) {
c44317ef 450 goto out;
d5970055 451 }
ade6d081
DDAG
452
453 /* Rebuild the regions list from the new sections list */
454 regions_size = offsetof(struct vhost_memory, regions) +
455 dev->n_mem_sections * sizeof dev->mem->regions[0];
456 dev->mem = g_realloc(dev->mem, regions_size);
457 dev->mem->nregions = dev->n_mem_sections;
458 used_memslots = dev->mem->nregions;
459 for (i = 0; i < dev->n_mem_sections; i++) {
460 struct vhost_memory_region *cur_vmr = dev->mem->regions + i;
461 struct MemoryRegionSection *mrs = dev->mem_sections + i;
462
463 cur_vmr->guest_phys_addr = mrs->offset_within_address_space;
464 cur_vmr->memory_size = int128_get64(mrs->size);
465 cur_vmr->userspace_addr =
466 (uintptr_t)memory_region_get_ram_ptr(mrs->mr) +
467 mrs->offset_within_region;
468 cur_vmr->flags_padding = 0;
469 }
470
471 if (!dev->started) {
c44317ef 472 goto out;
af603142 473 }
d5970055 474
0ca1fd2d
DDAG
475 for (i = 0; i < dev->mem->nregions; i++) {
476 if (vhost_verify_ring_mappings(dev,
477 (void *)(uintptr_t)dev->mem->regions[i].userspace_addr,
478 dev->mem->regions[i].guest_phys_addr,
479 dev->mem->regions[i].memory_size)) {
480 error_report("Verify ring failure on region %d", i);
481 abort();
482 }
d5970055
MT
483 }
484
485 if (!dev->log_enabled) {
21e70425 486 r = dev->vhost_ops->vhost_set_mem_table(dev, dev->mem);
162bba7f
MAL
487 if (r < 0) {
488 VHOST_OPS_DEBUG("vhost_set_mem_table failed");
489 }
c44317ef 490 goto out;
d5970055
MT
491 }
492 log_size = vhost_get_log_size(dev);
493 /* We allocate an extra 4K bytes to log,
494 * to reduce the * number of reallocations. */
495#define VHOST_LOG_BUFFER (0x1000 / sizeof *dev->log)
496 /* To log more, must increase log size before table update. */
497 if (dev->log_size < log_size) {
498 vhost_dev_log_resize(dev, log_size + VHOST_LOG_BUFFER);
499 }
21e70425 500 r = dev->vhost_ops->vhost_set_mem_table(dev, dev->mem);
162bba7f
MAL
501 if (r < 0) {
502 VHOST_OPS_DEBUG("vhost_set_mem_table failed");
503 }
d5970055
MT
504 /* To log less, can only decrease log size after table update. */
505 if (dev->log_size > log_size + VHOST_LOG_BUFFER) {
506 vhost_dev_log_resize(dev, log_size);
507 }
c44317ef
DDAG
508
509out:
510 /* Deref the old list of sections, this must happen _after_ the
511 * vhost_set_mem_table to ensure the client isn't still using the
512 * section we're about to unref.
513 */
514 while (n_old_sections--) {
515 memory_region_unref(old_sections[n_old_sections].mr);
516 }
517 g_free(old_sections);
518 return;
519}
520
48d7c975
DDAG
521/* Adds the section data to the tmp_section structure.
522 * It relies on the listener calling us in memory address order
523 * and for each region (via the _add and _nop methods) to
524 * join neighbours.
525 */
526static void vhost_region_add_section(struct vhost_dev *dev,
527 MemoryRegionSection *section)
c44317ef 528{
48d7c975
DDAG
529 bool need_add = true;
530 uint64_t mrs_size = int128_get64(section->size);
531 uint64_t mrs_gpa = section->offset_within_address_space;
532 uintptr_t mrs_host = (uintptr_t)memory_region_get_ram_ptr(section->mr) +
533 section->offset_within_region;
c1ece84e
DDAG
534 RAMBlock *mrs_rb = section->mr->ram_block;
535 size_t mrs_page = qemu_ram_pagesize(mrs_rb);
48d7c975
DDAG
536
537 trace_vhost_region_add_section(section->mr->name, mrs_gpa, mrs_size,
538 mrs_host);
539
c1ece84e
DDAG
540 /* Round the section to it's page size */
541 /* First align the start down to a page boundary */
542 uint64_t alignage = mrs_host & (mrs_page - 1);
543 if (alignage) {
544 mrs_host -= alignage;
545 mrs_size += alignage;
546 mrs_gpa -= alignage;
547 }
548 /* Now align the size up to a page boundary */
549 alignage = mrs_size & (mrs_page - 1);
550 if (alignage) {
551 mrs_size += mrs_page - alignage;
552 }
553 trace_vhost_region_add_section_aligned(section->mr->name, mrs_gpa, mrs_size,
554 mrs_host);
555
48d7c975
DDAG
556 if (dev->n_tmp_sections) {
557 /* Since we already have at least one section, lets see if
558 * this extends it; since we're scanning in order, we only
559 * have to look at the last one, and the FlatView that calls
560 * us shouldn't have overlaps.
561 */
562 MemoryRegionSection *prev_sec = dev->tmp_sections +
563 (dev->n_tmp_sections - 1);
564 uint64_t prev_gpa_start = prev_sec->offset_within_address_space;
565 uint64_t prev_size = int128_get64(prev_sec->size);
566 uint64_t prev_gpa_end = range_get_last(prev_gpa_start, prev_size);
567 uint64_t prev_host_start =
568 (uintptr_t)memory_region_get_ram_ptr(prev_sec->mr) +
569 prev_sec->offset_within_region;
570 uint64_t prev_host_end = range_get_last(prev_host_start, prev_size);
571
c1ece84e
DDAG
572 if (mrs_gpa <= (prev_gpa_end + 1)) {
573 /* OK, looks like overlapping/intersecting - it's possible that
574 * the rounding to page sizes has made them overlap, but they should
575 * match up in the same RAMBlock if they do.
576 */
577 if (mrs_gpa < prev_gpa_start) {
578 error_report("%s:Section rounded to %"PRIx64
579 " prior to previous %"PRIx64,
580 __func__, mrs_gpa, prev_gpa_start);
581 /* A way to cleanly fail here would be better */
582 return;
583 }
584 /* Offset from the start of the previous GPA to this GPA */
585 size_t offset = mrs_gpa - prev_gpa_start;
586
587 if (prev_host_start + offset == mrs_host &&
588 section->mr == prev_sec->mr &&
589 (!dev->vhost_ops->vhost_backend_can_merge ||
590 dev->vhost_ops->vhost_backend_can_merge(dev,
48d7c975
DDAG
591 mrs_host, mrs_size,
592 prev_host_start, prev_size))) {
c1ece84e
DDAG
593 uint64_t max_end = MAX(prev_host_end, mrs_host + mrs_size);
594 need_add = false;
595 prev_sec->offset_within_address_space =
596 MIN(prev_gpa_start, mrs_gpa);
597 prev_sec->offset_within_region =
598 MIN(prev_host_start, mrs_host) -
599 (uintptr_t)memory_region_get_ram_ptr(prev_sec->mr);
600 prev_sec->size = int128_make64(max_end - MIN(prev_host_start,
601 mrs_host));
602 trace_vhost_region_add_section_merge(section->mr->name,
603 int128_get64(prev_sec->size),
604 prev_sec->offset_within_address_space,
605 prev_sec->offset_within_region);
606 } else {
e7b94a84
DDAG
607 /* adjoining regions are fine, but overlapping ones with
608 * different blocks/offsets shouldn't happen
609 */
610 if (mrs_gpa != prev_gpa_end + 1) {
611 error_report("%s: Overlapping but not coherent sections "
612 "at %"PRIx64,
613 __func__, mrs_gpa);
614 return;
615 }
c1ece84e 616 }
48d7c975
DDAG
617 }
618 }
619
620 if (need_add) {
621 ++dev->n_tmp_sections;
622 dev->tmp_sections = g_renew(MemoryRegionSection, dev->tmp_sections,
623 dev->n_tmp_sections);
624 dev->tmp_sections[dev->n_tmp_sections - 1] = *section;
625 /* The flatview isn't stable and we don't use it, making it NULL
626 * means we can memcmp the list.
627 */
628 dev->tmp_sections[dev->n_tmp_sections - 1].fv = NULL;
629 memory_region_ref(section->mr);
630 }
50c1e149
AK
631}
632
938eeb64
DDAG
633/* Used for both add and nop callbacks */
634static void vhost_region_addnop(MemoryListener *listener,
635 MemoryRegionSection *section)
04097f7c 636{
2817b260
AK
637 struct vhost_dev *dev = container_of(listener, struct vhost_dev,
638 memory_listener);
639
988a2775 640 if (!vhost_section(dev, section)) {
c49450b9
AK
641 return;
642 }
48d7c975 643 vhost_region_add_section(dev, section);
04097f7c
AK
644}
645
375f74f4
JW
646static void vhost_iommu_unmap_notify(IOMMUNotifier *n, IOMMUTLBEntry *iotlb)
647{
648 struct vhost_iommu *iommu = container_of(n, struct vhost_iommu, n);
649 struct vhost_dev *hdev = iommu->hdev;
650 hwaddr iova = iotlb->iova + iommu->iommu_offset;
651
020e571b
MC
652 if (vhost_backend_invalidate_device_iotlb(hdev, iova,
653 iotlb->addr_mask + 1)) {
375f74f4
JW
654 error_report("Fail to invalidate device iotlb");
655 }
656}
657
658static void vhost_iommu_region_add(MemoryListener *listener,
659 MemoryRegionSection *section)
660{
661 struct vhost_dev *dev = container_of(listener, struct vhost_dev,
662 iommu_listener);
663 struct vhost_iommu *iommu;
698feb5e 664 Int128 end;
cb1efcf4 665 int iommu_idx;
388a86df 666 IOMMUMemoryRegion *iommu_mr;
375f74f4
JW
667
668 if (!memory_region_is_iommu(section->mr)) {
669 return;
670 }
671
388a86df
TB
672 iommu_mr = IOMMU_MEMORY_REGION(section->mr);
673
375f74f4 674 iommu = g_malloc0(sizeof(*iommu));
698feb5e
PX
675 end = int128_add(int128_make64(section->offset_within_region),
676 section->size);
677 end = int128_sub(end, int128_one());
cb1efcf4
PM
678 iommu_idx = memory_region_iommu_attrs_to_index(iommu_mr,
679 MEMTXATTRS_UNSPECIFIED);
698feb5e
PX
680 iommu_notifier_init(&iommu->n, vhost_iommu_unmap_notify,
681 IOMMU_NOTIFIER_UNMAP,
682 section->offset_within_region,
cb1efcf4
PM
683 int128_get64(end),
684 iommu_idx);
375f74f4
JW
685 iommu->mr = section->mr;
686 iommu->iommu_offset = section->offset_within_address_space -
687 section->offset_within_region;
688 iommu->hdev = dev;
689 memory_region_register_iommu_notifier(section->mr, &iommu->n);
690 QLIST_INSERT_HEAD(&dev->iommu_list, iommu, iommu_next);
691 /* TODO: can replay help performance here? */
692}
693
694static void vhost_iommu_region_del(MemoryListener *listener,
695 MemoryRegionSection *section)
696{
697 struct vhost_dev *dev = container_of(listener, struct vhost_dev,
698 iommu_listener);
699 struct vhost_iommu *iommu;
700
701 if (!memory_region_is_iommu(section->mr)) {
702 return;
703 }
704
705 QLIST_FOREACH(iommu, &dev->iommu_list, iommu_next) {
698feb5e
PX
706 if (iommu->mr == section->mr &&
707 iommu->n.start == section->offset_within_region) {
375f74f4
JW
708 memory_region_unregister_iommu_notifier(iommu->mr,
709 &iommu->n);
710 QLIST_REMOVE(iommu, iommu_next);
711 g_free(iommu);
712 break;
713 }
714 }
715}
716
d5970055
MT
717static int vhost_virtqueue_set_addr(struct vhost_dev *dev,
718 struct vhost_virtqueue *vq,
719 unsigned idx, bool enable_log)
720{
721 struct vhost_vring_addr addr = {
722 .index = idx,
2b3af999
SW
723 .desc_user_addr = (uint64_t)(unsigned long)vq->desc,
724 .avail_user_addr = (uint64_t)(unsigned long)vq->avail,
725 .used_user_addr = (uint64_t)(unsigned long)vq->used,
d5970055
MT
726 .log_guest_addr = vq->used_phys,
727 .flags = enable_log ? (1 << VHOST_VRING_F_LOG) : 0,
728 };
21e70425 729 int r = dev->vhost_ops->vhost_set_vring_addr(dev, &addr);
d5970055 730 if (r < 0) {
c6409692 731 VHOST_OPS_DEBUG("vhost_set_vring_addr failed");
d5970055
MT
732 return -errno;
733 }
734 return 0;
735}
736
c471ad0e
JW
737static int vhost_dev_set_features(struct vhost_dev *dev,
738 bool enable_log)
d5970055
MT
739{
740 uint64_t features = dev->acked_features;
741 int r;
742 if (enable_log) {
9a2ba823 743 features |= 0x1ULL << VHOST_F_LOG_ALL;
d5970055 744 }
21e70425 745 r = dev->vhost_ops->vhost_set_features(dev, features);
c6409692
MAL
746 if (r < 0) {
747 VHOST_OPS_DEBUG("vhost_set_features failed");
748 }
d5970055
MT
749 return r < 0 ? -errno : 0;
750}
751
752static int vhost_dev_set_log(struct vhost_dev *dev, bool enable_log)
753{
162bba7f 754 int r, i, idx;
d5970055
MT
755 r = vhost_dev_set_features(dev, enable_log);
756 if (r < 0) {
757 goto err_features;
758 }
759 for (i = 0; i < dev->nvqs; ++i) {
25a2a920
TC
760 idx = dev->vhost_ops->vhost_get_vq_index(dev, dev->vq_index + i);
761 r = vhost_virtqueue_set_addr(dev, dev->vqs + i, idx,
d5970055
MT
762 enable_log);
763 if (r < 0) {
764 goto err_vq;
765 }
766 }
767 return 0;
768err_vq:
769 for (; i >= 0; --i) {
25a2a920 770 idx = dev->vhost_ops->vhost_get_vq_index(dev, dev->vq_index + i);
162bba7f
MAL
771 vhost_virtqueue_set_addr(dev, dev->vqs + i, idx,
772 dev->log_enabled);
d5970055 773 }
162bba7f 774 vhost_dev_set_features(dev, dev->log_enabled);
d5970055
MT
775err_features:
776 return r;
777}
778
04097f7c 779static int vhost_migration_log(MemoryListener *listener, int enable)
d5970055 780{
04097f7c
AK
781 struct vhost_dev *dev = container_of(listener, struct vhost_dev,
782 memory_listener);
d5970055
MT
783 int r;
784 if (!!enable == dev->log_enabled) {
785 return 0;
786 }
787 if (!dev->started) {
788 dev->log_enabled = enable;
789 return 0;
790 }
791 if (!enable) {
792 r = vhost_dev_set_log(dev, false);
793 if (r < 0) {
794 return r;
795 }
309750fa 796 vhost_log_put(dev, false);
d5970055
MT
797 } else {
798 vhost_dev_log_resize(dev, vhost_get_log_size(dev));
799 r = vhost_dev_set_log(dev, true);
800 if (r < 0) {
801 return r;
802 }
803 }
804 dev->log_enabled = enable;
805 return 0;
806}
807
04097f7c
AK
808static void vhost_log_global_start(MemoryListener *listener)
809{
810 int r;
811
812 r = vhost_migration_log(listener, true);
813 if (r < 0) {
814 abort();
815 }
816}
817
818static void vhost_log_global_stop(MemoryListener *listener)
819{
820 int r;
821
822 r = vhost_migration_log(listener, false);
823 if (r < 0) {
824 abort();
825 }
826}
827
828static void vhost_log_start(MemoryListener *listener,
b2dfd71c
PB
829 MemoryRegionSection *section,
830 int old, int new)
04097f7c
AK
831{
832 /* FIXME: implement */
833}
834
835static void vhost_log_stop(MemoryListener *listener,
b2dfd71c
PB
836 MemoryRegionSection *section,
837 int old, int new)
04097f7c
AK
838{
839 /* FIXME: implement */
840}
841
46f70ff1
GK
842/* The vhost driver natively knows how to handle the vrings of non
843 * cross-endian legacy devices and modern devices. Only legacy devices
844 * exposed to a bi-endian guest may require the vhost driver to use a
845 * specific endianness.
846 */
a122ab24
GK
847static inline bool vhost_needs_vring_endian(VirtIODevice *vdev)
848{
e5848123
GK
849 if (virtio_vdev_has_feature(vdev, VIRTIO_F_VERSION_1)) {
850 return false;
851 }
a122ab24 852#ifdef HOST_WORDS_BIGENDIAN
46f70ff1 853 return vdev->device_endian == VIRTIO_DEVICE_ENDIAN_LITTLE;
a122ab24 854#else
46f70ff1 855 return vdev->device_endian == VIRTIO_DEVICE_ENDIAN_BIG;
a122ab24 856#endif
a122ab24
GK
857}
858
04b7a152
GK
859static int vhost_virtqueue_set_vring_endian_legacy(struct vhost_dev *dev,
860 bool is_big_endian,
861 int vhost_vq_index)
862{
863 struct vhost_vring_state s = {
864 .index = vhost_vq_index,
865 .num = is_big_endian
866 };
867
21e70425 868 if (!dev->vhost_ops->vhost_set_vring_endian(dev, &s)) {
04b7a152
GK
869 return 0;
870 }
871
c6409692 872 VHOST_OPS_DEBUG("vhost_set_vring_endian failed");
04b7a152
GK
873 if (errno == ENOTTY) {
874 error_report("vhost does not support cross-endian");
875 return -ENOSYS;
876 }
877
878 return -errno;
879}
880
c471ad0e
JW
881static int vhost_memory_region_lookup(struct vhost_dev *hdev,
882 uint64_t gpa, uint64_t *uaddr,
883 uint64_t *len)
884{
885 int i;
886
887 for (i = 0; i < hdev->mem->nregions; i++) {
888 struct vhost_memory_region *reg = hdev->mem->regions + i;
889
890 if (gpa >= reg->guest_phys_addr &&
891 reg->guest_phys_addr + reg->memory_size > gpa) {
892 *uaddr = reg->userspace_addr + gpa - reg->guest_phys_addr;
893 *len = reg->guest_phys_addr + reg->memory_size - gpa;
894 return 0;
895 }
896 }
897
898 return -EFAULT;
899}
900
fc58bd0d 901int vhost_device_iotlb_miss(struct vhost_dev *dev, uint64_t iova, int write)
c471ad0e
JW
902{
903 IOMMUTLBEntry iotlb;
904 uint64_t uaddr, len;
fc58bd0d 905 int ret = -EFAULT;
c471ad0e
JW
906
907 rcu_read_lock();
908
ffcbbe72
PX
909 trace_vhost_iotlb_miss(dev, 1);
910
c471ad0e 911 iotlb = address_space_get_iotlb_entry(dev->vdev->dma_as,
7446eb07
PM
912 iova, write,
913 MEMTXATTRS_UNSPECIFIED);
c471ad0e 914 if (iotlb.target_as != NULL) {
fc58bd0d
MC
915 ret = vhost_memory_region_lookup(dev, iotlb.translated_addr,
916 &uaddr, &len);
917 if (ret) {
ffcbbe72 918 trace_vhost_iotlb_miss(dev, 3);
c471ad0e
JW
919 error_report("Fail to lookup the translated address "
920 "%"PRIx64, iotlb.translated_addr);
921 goto out;
922 }
923
924 len = MIN(iotlb.addr_mask + 1, len);
925 iova = iova & ~iotlb.addr_mask;
926
020e571b
MC
927 ret = vhost_backend_update_device_iotlb(dev, iova, uaddr,
928 len, iotlb.perm);
fc58bd0d 929 if (ret) {
ffcbbe72 930 trace_vhost_iotlb_miss(dev, 4);
c471ad0e
JW
931 error_report("Fail to update device iotlb");
932 goto out;
933 }
934 }
ffcbbe72
PX
935
936 trace_vhost_iotlb_miss(dev, 2);
937
c471ad0e
JW
938out:
939 rcu_read_unlock();
fc58bd0d
MC
940
941 return ret;
c471ad0e
JW
942}
943
f56a1247 944static int vhost_virtqueue_start(struct vhost_dev *dev,
d5970055
MT
945 struct VirtIODevice *vdev,
946 struct vhost_virtqueue *vq,
947 unsigned idx)
948{
96a3d98d
JW
949 BusState *qbus = BUS(qdev_get_parent_bus(DEVICE(vdev)));
950 VirtioBusState *vbus = VIRTIO_BUS(qbus);
951 VirtioBusClass *k = VIRTIO_BUS_GET_CLASS(vbus);
a8170e5e 952 hwaddr s, l, a;
d5970055 953 int r;
21e70425 954 int vhost_vq_index = dev->vhost_ops->vhost_get_vq_index(dev, idx);
d5970055 955 struct vhost_vring_file file = {
a9f98bb5 956 .index = vhost_vq_index
d5970055
MT
957 };
958 struct vhost_vring_state state = {
a9f98bb5 959 .index = vhost_vq_index
d5970055
MT
960 };
961 struct VirtQueue *vvq = virtio_get_queue(vdev, idx);
962
fb20fbb7
JH
963 a = virtio_queue_get_desc_addr(vdev, idx);
964 if (a == 0) {
965 /* Queue might not be ready for start */
966 return 0;
967 }
a9f98bb5 968
d5970055 969 vq->num = state.num = virtio_queue_get_num(vdev, idx);
21e70425 970 r = dev->vhost_ops->vhost_set_vring_num(dev, &state);
d5970055 971 if (r) {
c6409692 972 VHOST_OPS_DEBUG("vhost_set_vring_num failed");
d5970055
MT
973 return -errno;
974 }
975
976 state.num = virtio_queue_get_last_avail_idx(vdev, idx);
21e70425 977 r = dev->vhost_ops->vhost_set_vring_base(dev, &state);
d5970055 978 if (r) {
c6409692 979 VHOST_OPS_DEBUG("vhost_set_vring_base failed");
d5970055
MT
980 return -errno;
981 }
982
e5848123 983 if (vhost_needs_vring_endian(vdev)) {
04b7a152
GK
984 r = vhost_virtqueue_set_vring_endian_legacy(dev,
985 virtio_is_big_endian(vdev),
986 vhost_vq_index);
987 if (r) {
988 return -errno;
989 }
990 }
991
f1f9e6c5 992 vq->desc_size = s = l = virtio_queue_get_desc_size(vdev, idx);
fb20fbb7 993 vq->desc_phys = a;
c471ad0e 994 vq->desc = vhost_memory_map(dev, a, &l, 0);
d5970055
MT
995 if (!vq->desc || l != s) {
996 r = -ENOMEM;
997 goto fail_alloc_desc;
998 }
f1f9e6c5
GK
999 vq->avail_size = s = l = virtio_queue_get_avail_size(vdev, idx);
1000 vq->avail_phys = a = virtio_queue_get_avail_addr(vdev, idx);
c471ad0e 1001 vq->avail = vhost_memory_map(dev, a, &l, 0);
d5970055
MT
1002 if (!vq->avail || l != s) {
1003 r = -ENOMEM;
1004 goto fail_alloc_avail;
1005 }
1006 vq->used_size = s = l = virtio_queue_get_used_size(vdev, idx);
1007 vq->used_phys = a = virtio_queue_get_used_addr(vdev, idx);
c471ad0e 1008 vq->used = vhost_memory_map(dev, a, &l, 1);
d5970055
MT
1009 if (!vq->used || l != s) {
1010 r = -ENOMEM;
1011 goto fail_alloc_used;
1012 }
1013
a9f98bb5 1014 r = vhost_virtqueue_set_addr(dev, vq, vhost_vq_index, dev->log_enabled);
d5970055
MT
1015 if (r < 0) {
1016 r = -errno;
1017 goto fail_alloc;
1018 }
a9f98bb5 1019
d5970055 1020 file.fd = event_notifier_get_fd(virtio_queue_get_host_notifier(vvq));
21e70425 1021 r = dev->vhost_ops->vhost_set_vring_kick(dev, &file);
d5970055 1022 if (r) {
c6409692 1023 VHOST_OPS_DEBUG("vhost_set_vring_kick failed");
c8852121 1024 r = -errno;
d5970055
MT
1025 goto fail_kick;
1026 }
1027
f56a1247
MT
1028 /* Clear and discard previous events if any. */
1029 event_notifier_test_and_clear(&vq->masked_notifier);
d5970055 1030
5669655a
VK
1031 /* Init vring in unmasked state, unless guest_notifier_mask
1032 * will do it later.
1033 */
1034 if (!vdev->use_guest_notifier_mask) {
1035 /* TODO: check and handle errors. */
1036 vhost_virtqueue_mask(dev, vdev, idx, false);
1037 }
1038
96a3d98d
JW
1039 if (k->query_guest_notifiers &&
1040 k->query_guest_notifiers(qbus->parent) &&
1041 virtio_queue_vector(vdev, idx) == VIRTIO_NO_VECTOR) {
1042 file.fd = -1;
1043 r = dev->vhost_ops->vhost_set_vring_call(dev, &file);
1044 if (r) {
1045 goto fail_vector;
1046 }
1047 }
1048
d5970055
MT
1049 return 0;
1050
96a3d98d 1051fail_vector:
d5970055 1052fail_kick:
d5970055 1053fail_alloc:
c471ad0e
JW
1054 vhost_memory_unmap(dev, vq->used, virtio_queue_get_used_size(vdev, idx),
1055 0, 0);
d5970055 1056fail_alloc_used:
c471ad0e
JW
1057 vhost_memory_unmap(dev, vq->avail, virtio_queue_get_avail_size(vdev, idx),
1058 0, 0);
d5970055 1059fail_alloc_avail:
c471ad0e
JW
1060 vhost_memory_unmap(dev, vq->desc, virtio_queue_get_desc_size(vdev, idx),
1061 0, 0);
d5970055
MT
1062fail_alloc_desc:
1063 return r;
1064}
1065
f56a1247 1066static void vhost_virtqueue_stop(struct vhost_dev *dev,
d5970055
MT
1067 struct VirtIODevice *vdev,
1068 struct vhost_virtqueue *vq,
1069 unsigned idx)
1070{
21e70425 1071 int vhost_vq_index = dev->vhost_ops->vhost_get_vq_index(dev, idx);
d5970055 1072 struct vhost_vring_state state = {
04b7a152 1073 .index = vhost_vq_index,
d5970055
MT
1074 };
1075 int r;
fb20fbb7 1076
fa4ae4be 1077 if (virtio_queue_get_desc_addr(vdev, idx) == 0) {
fb20fbb7
JH
1078 /* Don't stop the virtqueue which might have not been started */
1079 return;
1080 }
fc57fd99 1081
21e70425 1082 r = dev->vhost_ops->vhost_get_vring_base(dev, &state);
d5970055 1083 if (r < 0) {
c6409692 1084 VHOST_OPS_DEBUG("vhost VQ %d ring restore failed: %d", idx, r);
2ae39a11
MC
1085 /* Connection to the backend is broken, so let's sync internal
1086 * last avail idx to the device used idx.
1087 */
1088 virtio_queue_restore_last_avail_idx(vdev, idx);
499c5579
MAL
1089 } else {
1090 virtio_queue_set_last_avail_idx(vdev, idx, state.num);
d5970055 1091 }
3561ba14 1092 virtio_queue_invalidate_signalled_used(vdev, idx);
aa94d521 1093 virtio_queue_update_used_idx(vdev, idx);
04b7a152
GK
1094
1095 /* In the cross-endian case, we need to reset the vring endianness to
1096 * native as legacy devices expect so by default.
1097 */
e5848123 1098 if (vhost_needs_vring_endian(vdev)) {
162bba7f
MAL
1099 vhost_virtqueue_set_vring_endian_legacy(dev,
1100 !virtio_is_big_endian(vdev),
1101 vhost_vq_index);
04b7a152
GK
1102 }
1103
c471ad0e
JW
1104 vhost_memory_unmap(dev, vq->used, virtio_queue_get_used_size(vdev, idx),
1105 1, virtio_queue_get_used_size(vdev, idx));
1106 vhost_memory_unmap(dev, vq->avail, virtio_queue_get_avail_size(vdev, idx),
1107 0, virtio_queue_get_avail_size(vdev, idx));
1108 vhost_memory_unmap(dev, vq->desc, virtio_queue_get_desc_size(vdev, idx),
1109 0, virtio_queue_get_desc_size(vdev, idx));
d5970055
MT
1110}
1111
80a1ea37
AK
1112static void vhost_eventfd_add(MemoryListener *listener,
1113 MemoryRegionSection *section,
753d5e14 1114 bool match_data, uint64_t data, EventNotifier *e)
80a1ea37
AK
1115{
1116}
1117
1118static void vhost_eventfd_del(MemoryListener *listener,
1119 MemoryRegionSection *section,
753d5e14 1120 bool match_data, uint64_t data, EventNotifier *e)
80a1ea37
AK
1121{
1122}
1123
69e87b32
JW
1124static int vhost_virtqueue_set_busyloop_timeout(struct vhost_dev *dev,
1125 int n, uint32_t timeout)
1126{
1127 int vhost_vq_index = dev->vhost_ops->vhost_get_vq_index(dev, n);
1128 struct vhost_vring_state state = {
1129 .index = vhost_vq_index,
1130 .num = timeout,
1131 };
1132 int r;
1133
1134 if (!dev->vhost_ops->vhost_set_vring_busyloop_timeout) {
1135 return -EINVAL;
1136 }
1137
1138 r = dev->vhost_ops->vhost_set_vring_busyloop_timeout(dev, &state);
1139 if (r) {
c6409692 1140 VHOST_OPS_DEBUG("vhost_set_vring_busyloop_timeout failed");
69e87b32
JW
1141 return r;
1142 }
1143
1144 return 0;
1145}
1146
f56a1247
MT
1147static int vhost_virtqueue_init(struct vhost_dev *dev,
1148 struct vhost_virtqueue *vq, int n)
1149{
21e70425 1150 int vhost_vq_index = dev->vhost_ops->vhost_get_vq_index(dev, n);
f56a1247 1151 struct vhost_vring_file file = {
b931bfbf 1152 .index = vhost_vq_index,
f56a1247
MT
1153 };
1154 int r = event_notifier_init(&vq->masked_notifier, 0);
1155 if (r < 0) {
1156 return r;
1157 }
1158
1159 file.fd = event_notifier_get_fd(&vq->masked_notifier);
21e70425 1160 r = dev->vhost_ops->vhost_set_vring_call(dev, &file);
f56a1247 1161 if (r) {
c6409692 1162 VHOST_OPS_DEBUG("vhost_set_vring_call failed");
f56a1247
MT
1163 r = -errno;
1164 goto fail_call;
1165 }
c471ad0e
JW
1166
1167 vq->dev = dev;
1168
f56a1247
MT
1169 return 0;
1170fail_call:
1171 event_notifier_cleanup(&vq->masked_notifier);
1172 return r;
1173}
1174
1175static void vhost_virtqueue_cleanup(struct vhost_virtqueue *vq)
1176{
1177 event_notifier_cleanup(&vq->masked_notifier);
1178}
1179
81647a65 1180int vhost_dev_init(struct vhost_dev *hdev, void *opaque,
69e87b32 1181 VhostBackendType backend_type, uint32_t busyloop_timeout)
d5970055
MT
1182{
1183 uint64_t features;
a06db3ec 1184 int i, r, n_initialized_vqs = 0;
fe44dc91 1185 Error *local_err = NULL;
81647a65 1186
c471ad0e 1187 hdev->vdev = NULL;
d2fc4402
MAL
1188 hdev->migration_blocker = NULL;
1189
7cb8a9b9
MAL
1190 r = vhost_set_backend_type(hdev, backend_type);
1191 assert(r >= 0);
1a1bfac9 1192
7cb8a9b9
MAL
1193 r = hdev->vhost_ops->vhost_backend_init(hdev, opaque);
1194 if (r < 0) {
1195 goto fail;
24d1eb33
NN
1196 }
1197
21e70425 1198 r = hdev->vhost_ops->vhost_set_owner(hdev);
d5970055 1199 if (r < 0) {
c6409692 1200 VHOST_OPS_DEBUG("vhost_set_owner failed");
d5970055
MT
1201 goto fail;
1202 }
1203
21e70425 1204 r = hdev->vhost_ops->vhost_get_features(hdev, &features);
d5970055 1205 if (r < 0) {
c6409692 1206 VHOST_OPS_DEBUG("vhost_get_features failed");
d5970055
MT
1207 goto fail;
1208 }
f56a1247 1209
a06db3ec 1210 for (i = 0; i < hdev->nvqs; ++i, ++n_initialized_vqs) {
b931bfbf 1211 r = vhost_virtqueue_init(hdev, hdev->vqs + i, hdev->vq_index + i);
f56a1247 1212 if (r < 0) {
a06db3ec 1213 goto fail;
f56a1247
MT
1214 }
1215 }
69e87b32
JW
1216
1217 if (busyloop_timeout) {
1218 for (i = 0; i < hdev->nvqs; ++i) {
1219 r = vhost_virtqueue_set_busyloop_timeout(hdev, hdev->vq_index + i,
1220 busyloop_timeout);
1221 if (r < 0) {
1222 goto fail_busyloop;
1223 }
1224 }
1225 }
1226
d5970055
MT
1227 hdev->features = features;
1228
04097f7c 1229 hdev->memory_listener = (MemoryListener) {
50c1e149
AK
1230 .begin = vhost_begin,
1231 .commit = vhost_commit,
938eeb64
DDAG
1232 .region_add = vhost_region_addnop,
1233 .region_nop = vhost_region_addnop,
04097f7c
AK
1234 .log_start = vhost_log_start,
1235 .log_stop = vhost_log_stop,
1236 .log_sync = vhost_log_sync,
1237 .log_global_start = vhost_log_global_start,
1238 .log_global_stop = vhost_log_global_stop,
80a1ea37
AK
1239 .eventfd_add = vhost_eventfd_add,
1240 .eventfd_del = vhost_eventfd_del,
72e22d2f 1241 .priority = 10
04097f7c 1242 };
d2fc4402 1243
375f74f4
JW
1244 hdev->iommu_listener = (MemoryListener) {
1245 .region_add = vhost_iommu_region_add,
1246 .region_del = vhost_iommu_region_del,
1247 };
c471ad0e 1248
d2fc4402
MAL
1249 if (hdev->migration_blocker == NULL) {
1250 if (!(hdev->features & (0x1ULL << VHOST_F_LOG_ALL))) {
1251 error_setg(&hdev->migration_blocker,
1252 "Migration disabled: vhost lacks VHOST_F_LOG_ALL feature.");
648abbfb 1253 } else if (vhost_dev_log_is_shared(hdev) && !qemu_memfd_alloc_check()) {
31190ed7
MAL
1254 error_setg(&hdev->migration_blocker,
1255 "Migration disabled: failed to allocate shared memory");
d2fc4402
MAL
1256 }
1257 }
1258
1259 if (hdev->migration_blocker != NULL) {
fe44dc91
AA
1260 r = migrate_add_blocker(hdev->migration_blocker, &local_err);
1261 if (local_err) {
1262 error_report_err(local_err);
1263 error_free(hdev->migration_blocker);
1264 goto fail_busyloop;
1265 }
7145872e 1266 }
d2fc4402 1267
7267c094 1268 hdev->mem = g_malloc0(offsetof(struct vhost_memory, regions));
2817b260
AK
1269 hdev->n_mem_sections = 0;
1270 hdev->mem_sections = NULL;
d5970055
MT
1271 hdev->log = NULL;
1272 hdev->log_size = 0;
1273 hdev->log_enabled = false;
1274 hdev->started = false;
f6790af6 1275 memory_listener_register(&hdev->memory_listener, &address_space_memory);
5be5f9be 1276 QLIST_INSERT_HEAD(&vhost_devices, hdev, entry);
9e2a2a3e
JZ
1277
1278 if (used_memslots > hdev->vhost_ops->vhost_backend_memslots_limit(hdev)) {
1279 error_report("vhost backend memory slots limit is less"
1280 " than current number of present memory slots");
1281 r = -1;
1282 if (busyloop_timeout) {
1283 goto fail_busyloop;
1284 } else {
1285 goto fail;
1286 }
1287 }
1288
d5970055 1289 return 0;
a06db3ec 1290
69e87b32
JW
1291fail_busyloop:
1292 while (--i >= 0) {
1293 vhost_virtqueue_set_busyloop_timeout(hdev, hdev->vq_index + i, 0);
1294 }
d5970055 1295fail:
a06db3ec
MAL
1296 hdev->nvqs = n_initialized_vqs;
1297 vhost_dev_cleanup(hdev);
d5970055
MT
1298 return r;
1299}
1300
1301void vhost_dev_cleanup(struct vhost_dev *hdev)
1302{
f56a1247 1303 int i;
e0547b59 1304
f56a1247
MT
1305 for (i = 0; i < hdev->nvqs; ++i) {
1306 vhost_virtqueue_cleanup(hdev->vqs + i);
1307 }
5be5f9be
MAL
1308 if (hdev->mem) {
1309 /* those are only safe after successful init */
1310 memory_listener_unregister(&hdev->memory_listener);
1311 QLIST_REMOVE(hdev, entry);
1312 }
7145872e
MT
1313 if (hdev->migration_blocker) {
1314 migrate_del_blocker(hdev->migration_blocker);
1315 error_free(hdev->migration_blocker);
1316 }
7267c094 1317 g_free(hdev->mem);
2817b260 1318 g_free(hdev->mem_sections);
e0547b59
MAL
1319 if (hdev->vhost_ops) {
1320 hdev->vhost_ops->vhost_backend_cleanup(hdev);
1321 }
7b527247 1322 assert(!hdev->log);
e0547b59
MAL
1323
1324 memset(hdev, 0, sizeof(struct vhost_dev));
d5970055
MT
1325}
1326
b0b3db79
MT
1327/* Stop processing guest IO notifications in qemu.
1328 * Start processing them in vhost in kernel.
1329 */
1330int vhost_dev_enable_notifiers(struct vhost_dev *hdev, VirtIODevice *vdev)
1331{
1c819449 1332 BusState *qbus = BUS(qdev_get_parent_bus(DEVICE(vdev)));
16617e36 1333 int i, r, e;
4afba631 1334
310837de
PB
1335 /* We will pass the notifiers to the kernel, make sure that QEMU
1336 * doesn't interfere.
1337 */
1338 r = virtio_device_grab_ioeventfd(vdev);
1339 if (r < 0) {
4afba631 1340 error_report("binding does not support host notifiers");
b0b3db79
MT
1341 goto fail;
1342 }
1343
1344 for (i = 0; i < hdev->nvqs; ++i) {
b1f0a33d
CH
1345 r = virtio_bus_set_host_notifier(VIRTIO_BUS(qbus), hdev->vq_index + i,
1346 true);
b0b3db79 1347 if (r < 0) {
4afba631 1348 error_report("vhost VQ %d notifier binding failed: %d", i, -r);
b0b3db79
MT
1349 goto fail_vq;
1350 }
1351 }
1352
1353 return 0;
1354fail_vq:
1355 while (--i >= 0) {
b1f0a33d
CH
1356 e = virtio_bus_set_host_notifier(VIRTIO_BUS(qbus), hdev->vq_index + i,
1357 false);
16617e36 1358 if (e < 0) {
4afba631 1359 error_report("vhost VQ %d notifier cleanup error: %d", i, -r);
b0b3db79 1360 }
16617e36 1361 assert (e >= 0);
76143618 1362 virtio_bus_cleanup_host_notifier(VIRTIO_BUS(qbus), hdev->vq_index + i);
b0b3db79 1363 }
310837de 1364 virtio_device_release_ioeventfd(vdev);
b0b3db79
MT
1365fail:
1366 return r;
1367}
1368
1369/* Stop processing guest IO notifications in vhost.
1370 * Start processing them in qemu.
1371 * This might actually run the qemu handlers right away,
1372 * so virtio in qemu must be completely setup when this is called.
1373 */
1374void vhost_dev_disable_notifiers(struct vhost_dev *hdev, VirtIODevice *vdev)
1375{
1c819449 1376 BusState *qbus = BUS(qdev_get_parent_bus(DEVICE(vdev)));
b0b3db79
MT
1377 int i, r;
1378
1379 for (i = 0; i < hdev->nvqs; ++i) {
b1f0a33d
CH
1380 r = virtio_bus_set_host_notifier(VIRTIO_BUS(qbus), hdev->vq_index + i,
1381 false);
b0b3db79 1382 if (r < 0) {
4afba631 1383 error_report("vhost VQ %d notifier cleanup failed: %d", i, -r);
b0b3db79
MT
1384 }
1385 assert (r >= 0);
76143618 1386 virtio_bus_cleanup_host_notifier(VIRTIO_BUS(qbus), hdev->vq_index + i);
b0b3db79 1387 }
310837de 1388 virtio_device_release_ioeventfd(vdev);
b0b3db79
MT
1389}
1390
f56a1247
MT
1391/* Test and clear event pending status.
1392 * Should be called after unmask to avoid losing events.
1393 */
1394bool vhost_virtqueue_pending(struct vhost_dev *hdev, int n)
1395{
a9f98bb5 1396 struct vhost_virtqueue *vq = hdev->vqs + n - hdev->vq_index;
a9f98bb5 1397 assert(n >= hdev->vq_index && n < hdev->vq_index + hdev->nvqs);
f56a1247
MT
1398 return event_notifier_test_and_clear(&vq->masked_notifier);
1399}
1400
1401/* Mask/unmask events from this vq. */
1402void vhost_virtqueue_mask(struct vhost_dev *hdev, VirtIODevice *vdev, int n,
1403 bool mask)
1404{
1405 struct VirtQueue *vvq = virtio_get_queue(vdev, n);
a9f98bb5 1406 int r, index = n - hdev->vq_index;
fc57fd99 1407 struct vhost_vring_file file;
f56a1247 1408
8695de0f
MAL
1409 /* should only be called after backend is connected */
1410 assert(hdev->vhost_ops);
1411
f56a1247 1412 if (mask) {
5669655a 1413 assert(vdev->use_guest_notifier_mask);
a9f98bb5 1414 file.fd = event_notifier_get_fd(&hdev->vqs[index].masked_notifier);
f56a1247
MT
1415 } else {
1416 file.fd = event_notifier_get_fd(virtio_queue_get_guest_notifier(vvq));
1417 }
fc57fd99 1418
21e70425
MAL
1419 file.index = hdev->vhost_ops->vhost_get_vq_index(hdev, n);
1420 r = hdev->vhost_ops->vhost_set_vring_call(hdev, &file);
162bba7f
MAL
1421 if (r < 0) {
1422 VHOST_OPS_DEBUG("vhost_set_vring_call failed");
1423 }
f56a1247
MT
1424}
1425
9a2ba823
CH
1426uint64_t vhost_get_features(struct vhost_dev *hdev, const int *feature_bits,
1427 uint64_t features)
2e6d46d7
NN
1428{
1429 const int *bit = feature_bits;
1430 while (*bit != VHOST_INVALID_FEATURE_BIT) {
9a2ba823 1431 uint64_t bit_mask = (1ULL << *bit);
2e6d46d7
NN
1432 if (!(hdev->features & bit_mask)) {
1433 features &= ~bit_mask;
1434 }
1435 bit++;
1436 }
1437 return features;
1438}
1439
1440void vhost_ack_features(struct vhost_dev *hdev, const int *feature_bits,
9a2ba823 1441 uint64_t features)
2e6d46d7
NN
1442{
1443 const int *bit = feature_bits;
1444 while (*bit != VHOST_INVALID_FEATURE_BIT) {
9a2ba823 1445 uint64_t bit_mask = (1ULL << *bit);
2e6d46d7
NN
1446 if (features & bit_mask) {
1447 hdev->acked_features |= bit_mask;
1448 }
1449 bit++;
1450 }
1451}
1452
4c3e257b
CL
1453int vhost_dev_get_config(struct vhost_dev *hdev, uint8_t *config,
1454 uint32_t config_len)
1455{
1456 assert(hdev->vhost_ops);
1457
1458 if (hdev->vhost_ops->vhost_get_config) {
1459 return hdev->vhost_ops->vhost_get_config(hdev, config, config_len);
1460 }
1461
1462 return -1;
1463}
1464
1465int vhost_dev_set_config(struct vhost_dev *hdev, const uint8_t *data,
1466 uint32_t offset, uint32_t size, uint32_t flags)
1467{
1468 assert(hdev->vhost_ops);
1469
1470 if (hdev->vhost_ops->vhost_set_config) {
1471 return hdev->vhost_ops->vhost_set_config(hdev, data, offset,
1472 size, flags);
1473 }
1474
1475 return -1;
1476}
1477
1478void vhost_dev_set_config_notifier(struct vhost_dev *hdev,
1479 const VhostDevConfigOps *ops)
1480{
4c3e257b
CL
1481 hdev->config_ops = ops;
1482}
1483
b0b3db79 1484/* Host notifiers must be enabled at this point. */
d5970055
MT
1485int vhost_dev_start(struct vhost_dev *hdev, VirtIODevice *vdev)
1486{
1487 int i, r;
24f4fe34 1488
8695de0f
MAL
1489 /* should only be called after backend is connected */
1490 assert(hdev->vhost_ops);
1491
24f4fe34 1492 hdev->started = true;
c471ad0e 1493 hdev->vdev = vdev;
24f4fe34 1494
d5970055
MT
1495 r = vhost_dev_set_features(hdev, hdev->log_enabled);
1496 if (r < 0) {
54dd9321 1497 goto fail_features;
d5970055 1498 }
c471ad0e
JW
1499
1500 if (vhost_dev_has_iommu(hdev)) {
375f74f4 1501 memory_listener_register(&hdev->iommu_listener, vdev->dma_as);
c471ad0e
JW
1502 }
1503
21e70425 1504 r = hdev->vhost_ops->vhost_set_mem_table(hdev, hdev->mem);
d5970055 1505 if (r < 0) {
c6409692 1506 VHOST_OPS_DEBUG("vhost_set_mem_table failed");
d5970055 1507 r = -errno;
54dd9321 1508 goto fail_mem;
d5970055 1509 }
d154e0ba 1510 for (i = 0; i < hdev->nvqs; ++i) {
f56a1247 1511 r = vhost_virtqueue_start(hdev,
a9f98bb5
JW
1512 vdev,
1513 hdev->vqs + i,
1514 hdev->vq_index + i);
d154e0ba
MT
1515 if (r < 0) {
1516 goto fail_vq;
1517 }
1518 }
1519
d5970055 1520 if (hdev->log_enabled) {
e05ca820
MT
1521 uint64_t log_base;
1522
d5970055 1523 hdev->log_size = vhost_get_log_size(hdev);
15324404
MAL
1524 hdev->log = vhost_log_get(hdev->log_size,
1525 vhost_dev_log_is_shared(hdev));
309750fa 1526 log_base = (uintptr_t)hdev->log->log;
c2bea314 1527 r = hdev->vhost_ops->vhost_set_log_base(hdev,
9a78a5dd
MAL
1528 hdev->log_size ? log_base : 0,
1529 hdev->log);
d5970055 1530 if (r < 0) {
c6409692 1531 VHOST_OPS_DEBUG("vhost_set_log_base failed");
d5970055 1532 r = -errno;
54dd9321 1533 goto fail_log;
d5970055
MT
1534 }
1535 }
d154e0ba 1536
c471ad0e
JW
1537 if (vhost_dev_has_iommu(hdev)) {
1538 hdev->vhost_ops->vhost_set_iotlb_callback(hdev, true);
1539
1540 /* Update used ring information for IOTLB to work correctly,
1541 * vhost-kernel code requires for this.*/
1542 for (i = 0; i < hdev->nvqs; ++i) {
1543 struct vhost_virtqueue *vq = hdev->vqs + i;
1544 vhost_device_iotlb_miss(hdev, vq->used_phys, true);
1545 }
1546 }
d5970055 1547 return 0;
54dd9321 1548fail_log:
24bfa207 1549 vhost_log_put(hdev, false);
d5970055
MT
1550fail_vq:
1551 while (--i >= 0) {
f56a1247 1552 vhost_virtqueue_stop(hdev,
a9f98bb5
JW
1553 vdev,
1554 hdev->vqs + i,
1555 hdev->vq_index + i);
d5970055 1556 }
a9f98bb5 1557 i = hdev->nvqs;
c471ad0e 1558
54dd9321
MT
1559fail_mem:
1560fail_features:
24f4fe34
MT
1561
1562 hdev->started = false;
d5970055
MT
1563 return r;
1564}
1565
b0b3db79 1566/* Host notifiers must be enabled at this point. */
d5970055
MT
1567void vhost_dev_stop(struct vhost_dev *hdev, VirtIODevice *vdev)
1568{
a9f98bb5 1569 int i;
54dd9321 1570
8695de0f
MAL
1571 /* should only be called after backend is connected */
1572 assert(hdev->vhost_ops);
1573
d5970055 1574 for (i = 0; i < hdev->nvqs; ++i) {
f56a1247 1575 vhost_virtqueue_stop(hdev,
a9f98bb5
JW
1576 vdev,
1577 hdev->vqs + i,
1578 hdev->vq_index + i);
d5970055 1579 }
54dd9321 1580
c471ad0e
JW
1581 if (vhost_dev_has_iommu(hdev)) {
1582 hdev->vhost_ops->vhost_set_iotlb_callback(hdev, false);
375f74f4 1583 memory_listener_unregister(&hdev->iommu_listener);
c471ad0e 1584 }
309750fa 1585 vhost_log_put(hdev, true);
d5970055 1586 hdev->started = false;
c471ad0e 1587 hdev->vdev = NULL;
d5970055 1588}
950d94ba
MAL
1589
1590int vhost_net_set_backend(struct vhost_dev *hdev,
1591 struct vhost_vring_file *file)
1592{
1593 if (hdev->vhost_ops->vhost_net_set_backend) {
1594 return hdev->vhost_ops->vhost_net_set_backend(hdev, file);
1595 }
1596
1597 return -1;
1598}