]>
Commit | Line | Data |
---|---|---|
d5970055 MT |
1 | /* |
2 | * vhost support | |
3 | * | |
4 | * Copyright Red Hat, Inc. 2010 | |
5 | * | |
6 | * Authors: | |
7 | * Michael S. Tsirkin <mst@redhat.com> | |
8 | * | |
9 | * This work is licensed under the terms of the GNU GPL, version 2. See | |
10 | * the COPYING file in the top-level directory. | |
6b620ca3 PB |
11 | * |
12 | * Contributions after 2012-01-13 are licensed under the terms of the | |
13 | * GNU GPL, version 2 or (at your option) any later version. | |
d5970055 MT |
14 | */ |
15 | ||
9b8bfe21 | 16 | #include "qemu/osdep.h" |
da34e65c | 17 | #include "qapi/error.h" |
0d09e41a | 18 | #include "hw/virtio/vhost.h" |
5444e768 | 19 | #include "qemu/atomic.h" |
1de7afc9 | 20 | #include "qemu/range.h" |
04b7a152 | 21 | #include "qemu/error-report.h" |
15324404 | 22 | #include "qemu/memfd.h" |
18658a3c | 23 | #include "standard-headers/linux/vhost_types.h" |
022c62cb | 24 | #include "exec/address-spaces.h" |
1c819449 | 25 | #include "hw/virtio/virtio-bus.h" |
04b7a152 | 26 | #include "hw/virtio/virtio-access.h" |
795c40b8 | 27 | #include "migration/blocker.h" |
ca77ee28 | 28 | #include "migration/qemu-file-types.h" |
c471ad0e | 29 | #include "sysemu/dma.h" |
083b9bd7 | 30 | #include "sysemu/tcg.h" |
aa3c40f6 | 31 | #include "trace.h" |
d5970055 | 32 | |
162bba7f MAL |
33 | /* enabled until disconnected backend stabilizes */ |
34 | #define _VHOST_DEBUG 1 | |
35 | ||
36 | #ifdef _VHOST_DEBUG | |
37 | #define VHOST_OPS_DEBUG(fmt, ...) \ | |
38 | do { error_report(fmt ": %s (%d)", ## __VA_ARGS__, \ | |
39 | strerror(errno), errno); } while (0) | |
40 | #else | |
41 | #define VHOST_OPS_DEBUG(fmt, ...) \ | |
42 | do { } while (0) | |
43 | #endif | |
44 | ||
309750fa | 45 | static struct vhost_log *vhost_log; |
15324404 | 46 | static struct vhost_log *vhost_log_shm; |
309750fa | 47 | |
2ce68e4c IM |
48 | static unsigned int used_memslots; |
49 | static QLIST_HEAD(, vhost_dev) vhost_devices = | |
50 | QLIST_HEAD_INITIALIZER(vhost_devices); | |
51 | ||
52 | bool vhost_has_free_slot(void) | |
53 | { | |
54 | unsigned int slots_limit = ~0U; | |
55 | struct vhost_dev *hdev; | |
56 | ||
57 | QLIST_FOREACH(hdev, &vhost_devices, entry) { | |
58 | unsigned int r = hdev->vhost_ops->vhost_backend_memslots_limit(hdev); | |
59 | slots_limit = MIN(slots_limit, r); | |
60 | } | |
61 | return slots_limit > used_memslots; | |
62 | } | |
63 | ||
d5970055 | 64 | static void vhost_dev_sync_region(struct vhost_dev *dev, |
2817b260 | 65 | MemoryRegionSection *section, |
d5970055 MT |
66 | uint64_t mfirst, uint64_t mlast, |
67 | uint64_t rfirst, uint64_t rlast) | |
68 | { | |
309750fa JW |
69 | vhost_log_chunk_t *log = dev->log->log; |
70 | ||
d5970055 MT |
71 | uint64_t start = MAX(mfirst, rfirst); |
72 | uint64_t end = MIN(mlast, rlast); | |
309750fa JW |
73 | vhost_log_chunk_t *from = log + start / VHOST_LOG_CHUNK; |
74 | vhost_log_chunk_t *to = log + end / VHOST_LOG_CHUNK + 1; | |
33c5793b | 75 | uint64_t addr = QEMU_ALIGN_DOWN(start, VHOST_LOG_CHUNK); |
d5970055 | 76 | |
d5970055 MT |
77 | if (end < start) { |
78 | return; | |
79 | } | |
e314672a | 80 | assert(end / VHOST_LOG_CHUNK < dev->log_size); |
fbbaf9ae | 81 | assert(start / VHOST_LOG_CHUNK < dev->log_size); |
e314672a | 82 | |
d5970055 MT |
83 | for (;from < to; ++from) { |
84 | vhost_log_chunk_t log; | |
d5970055 MT |
85 | /* We first check with non-atomic: much cheaper, |
86 | * and we expect non-dirty to be the common case. */ | |
87 | if (!*from) { | |
0c600ce2 | 88 | addr += VHOST_LOG_CHUNK; |
d5970055 MT |
89 | continue; |
90 | } | |
5444e768 PB |
91 | /* Data must be read atomically. We don't really need barrier semantics |
92 | * but it's easier to use atomic_* than roll our own. */ | |
d73415a3 | 93 | log = qatomic_xchg(from, 0); |
747eb78b NC |
94 | while (log) { |
95 | int bit = ctzl(log); | |
6b37a23d MT |
96 | hwaddr page_addr; |
97 | hwaddr section_offset; | |
98 | hwaddr mr_offset; | |
6b37a23d MT |
99 | page_addr = addr + bit * VHOST_LOG_PAGE; |
100 | section_offset = page_addr - section->offset_within_address_space; | |
101 | mr_offset = section_offset + section->offset_within_region; | |
102 | memory_region_set_dirty(section->mr, mr_offset, VHOST_LOG_PAGE); | |
d5970055 MT |
103 | log &= ~(0x1ull << bit); |
104 | } | |
105 | addr += VHOST_LOG_CHUNK; | |
106 | } | |
107 | } | |
108 | ||
04097f7c | 109 | static int vhost_sync_dirty_bitmap(struct vhost_dev *dev, |
2817b260 | 110 | MemoryRegionSection *section, |
6b37a23d MT |
111 | hwaddr first, |
112 | hwaddr last) | |
d5970055 | 113 | { |
d5970055 | 114 | int i; |
6b37a23d MT |
115 | hwaddr start_addr; |
116 | hwaddr end_addr; | |
04097f7c | 117 | |
d5970055 MT |
118 | if (!dev->log_enabled || !dev->started) { |
119 | return 0; | |
120 | } | |
6b37a23d | 121 | start_addr = section->offset_within_address_space; |
052e87b0 | 122 | end_addr = range_get_last(start_addr, int128_get64(section->size)); |
6b37a23d MT |
123 | start_addr = MAX(first, start_addr); |
124 | end_addr = MIN(last, end_addr); | |
125 | ||
d5970055 MT |
126 | for (i = 0; i < dev->mem->nregions; ++i) { |
127 | struct vhost_memory_region *reg = dev->mem->regions + i; | |
2817b260 | 128 | vhost_dev_sync_region(dev, section, start_addr, end_addr, |
d5970055 MT |
129 | reg->guest_phys_addr, |
130 | range_get_last(reg->guest_phys_addr, | |
131 | reg->memory_size)); | |
132 | } | |
133 | for (i = 0; i < dev->nvqs; ++i) { | |
134 | struct vhost_virtqueue *vq = dev->vqs + i; | |
240e647a LH |
135 | |
136 | if (!vq->used_phys && !vq->used_size) { | |
137 | continue; | |
138 | } | |
139 | ||
2817b260 | 140 | vhost_dev_sync_region(dev, section, start_addr, end_addr, vq->used_phys, |
d5970055 MT |
141 | range_get_last(vq->used_phys, vq->used_size)); |
142 | } | |
143 | return 0; | |
144 | } | |
145 | ||
04097f7c AK |
146 | static void vhost_log_sync(MemoryListener *listener, |
147 | MemoryRegionSection *section) | |
148 | { | |
149 | struct vhost_dev *dev = container_of(listener, struct vhost_dev, | |
150 | memory_listener); | |
6b37a23d MT |
151 | vhost_sync_dirty_bitmap(dev, section, 0x0, ~0x0ULL); |
152 | } | |
04097f7c | 153 | |
6b37a23d MT |
154 | static void vhost_log_sync_range(struct vhost_dev *dev, |
155 | hwaddr first, hwaddr last) | |
156 | { | |
157 | int i; | |
158 | /* FIXME: this is N^2 in number of sections */ | |
159 | for (i = 0; i < dev->n_mem_sections; ++i) { | |
160 | MemoryRegionSection *section = &dev->mem_sections[i]; | |
161 | vhost_sync_dirty_bitmap(dev, section, first, last); | |
162 | } | |
04097f7c AK |
163 | } |
164 | ||
d5970055 MT |
165 | static uint64_t vhost_get_log_size(struct vhost_dev *dev) |
166 | { | |
167 | uint64_t log_size = 0; | |
168 | int i; | |
169 | for (i = 0; i < dev->mem->nregions; ++i) { | |
170 | struct vhost_memory_region *reg = dev->mem->regions + i; | |
171 | uint64_t last = range_get_last(reg->guest_phys_addr, | |
172 | reg->memory_size); | |
173 | log_size = MAX(log_size, last / VHOST_LOG_CHUNK + 1); | |
174 | } | |
175 | for (i = 0; i < dev->nvqs; ++i) { | |
176 | struct vhost_virtqueue *vq = dev->vqs + i; | |
240e647a LH |
177 | |
178 | if (!vq->used_phys && !vq->used_size) { | |
179 | continue; | |
180 | } | |
181 | ||
d5970055 MT |
182 | uint64_t last = vq->used_phys + vq->used_size - 1; |
183 | log_size = MAX(log_size, last / VHOST_LOG_CHUNK + 1); | |
184 | } | |
185 | return log_size; | |
186 | } | |
15324404 MAL |
187 | |
188 | static struct vhost_log *vhost_log_alloc(uint64_t size, bool share) | |
309750fa | 189 | { |
0f2956f9 | 190 | Error *err = NULL; |
15324404 MAL |
191 | struct vhost_log *log; |
192 | uint64_t logsize = size * sizeof(*(log->log)); | |
193 | int fd = -1; | |
194 | ||
195 | log = g_new0(struct vhost_log, 1); | |
196 | if (share) { | |
197 | log->log = qemu_memfd_alloc("vhost-log", logsize, | |
198 | F_SEAL_GROW | F_SEAL_SHRINK | F_SEAL_SEAL, | |
0f2956f9 MAL |
199 | &fd, &err); |
200 | if (err) { | |
201 | error_report_err(err); | |
202 | g_free(log); | |
203 | return NULL; | |
204 | } | |
15324404 MAL |
205 | memset(log->log, 0, logsize); |
206 | } else { | |
207 | log->log = g_malloc0(logsize); | |
208 | } | |
309750fa JW |
209 | |
210 | log->size = size; | |
211 | log->refcnt = 1; | |
15324404 | 212 | log->fd = fd; |
309750fa JW |
213 | |
214 | return log; | |
215 | } | |
216 | ||
15324404 | 217 | static struct vhost_log *vhost_log_get(uint64_t size, bool share) |
309750fa | 218 | { |
15324404 MAL |
219 | struct vhost_log *log = share ? vhost_log_shm : vhost_log; |
220 | ||
221 | if (!log || log->size != size) { | |
222 | log = vhost_log_alloc(size, share); | |
223 | if (share) { | |
224 | vhost_log_shm = log; | |
225 | } else { | |
226 | vhost_log = log; | |
227 | } | |
309750fa | 228 | } else { |
15324404 | 229 | ++log->refcnt; |
309750fa JW |
230 | } |
231 | ||
15324404 | 232 | return log; |
309750fa JW |
233 | } |
234 | ||
235 | static void vhost_log_put(struct vhost_dev *dev, bool sync) | |
236 | { | |
237 | struct vhost_log *log = dev->log; | |
238 | ||
239 | if (!log) { | |
240 | return; | |
241 | } | |
242 | ||
243 | --log->refcnt; | |
244 | if (log->refcnt == 0) { | |
245 | /* Sync only the range covered by the old log */ | |
246 | if (dev->log_size && sync) { | |
247 | vhost_log_sync_range(dev, 0, dev->log_size * VHOST_LOG_CHUNK - 1); | |
248 | } | |
15324404 | 249 | |
309750fa | 250 | if (vhost_log == log) { |
15324404 | 251 | g_free(log->log); |
309750fa | 252 | vhost_log = NULL; |
15324404 MAL |
253 | } else if (vhost_log_shm == log) { |
254 | qemu_memfd_free(log->log, log->size * sizeof(*(log->log)), | |
255 | log->fd); | |
256 | vhost_log_shm = NULL; | |
309750fa | 257 | } |
15324404 | 258 | |
309750fa JW |
259 | g_free(log); |
260 | } | |
5c0ba1be FF |
261 | |
262 | dev->log = NULL; | |
263 | dev->log_size = 0; | |
309750fa | 264 | } |
d5970055 | 265 | |
15324404 MAL |
266 | static bool vhost_dev_log_is_shared(struct vhost_dev *dev) |
267 | { | |
268 | return dev->vhost_ops->vhost_requires_shm_log && | |
269 | dev->vhost_ops->vhost_requires_shm_log(dev); | |
270 | } | |
271 | ||
272 | static inline void vhost_dev_log_resize(struct vhost_dev *dev, uint64_t size) | |
d5970055 | 273 | { |
15324404 | 274 | struct vhost_log *log = vhost_log_get(size, vhost_dev_log_is_shared(dev)); |
309750fa | 275 | uint64_t log_base = (uintptr_t)log->log; |
6b37a23d | 276 | int r; |
6528499f | 277 | |
636f4ddd MAL |
278 | /* inform backend of log switching, this must be done before |
279 | releasing the current log, to ensure no logging is lost */ | |
9a78a5dd | 280 | r = dev->vhost_ops->vhost_set_log_base(dev, log_base, log); |
162bba7f MAL |
281 | if (r < 0) { |
282 | VHOST_OPS_DEBUG("vhost_set_log_base failed"); | |
283 | } | |
284 | ||
309750fa | 285 | vhost_log_put(dev, true); |
d5970055 MT |
286 | dev->log = log; |
287 | dev->log_size = size; | |
288 | } | |
289 | ||
c471ad0e JW |
290 | static int vhost_dev_has_iommu(struct vhost_dev *dev) |
291 | { | |
292 | VirtIODevice *vdev = dev->vdev; | |
c471ad0e | 293 | |
f7ef7e6e JW |
294 | /* |
295 | * For vhost, VIRTIO_F_IOMMU_PLATFORM means the backend support | |
296 | * incremental memory mapping API via IOTLB API. For platform that | |
297 | * does not have IOMMU, there's no need to enable this feature | |
298 | * which may cause unnecessary IOTLB miss/update trnasactions. | |
299 | */ | |
300 | return vdev->dma_as != &address_space_memory && | |
301 | virtio_host_has_feature(vdev, VIRTIO_F_IOMMU_PLATFORM); | |
c471ad0e JW |
302 | } |
303 | ||
304 | static void *vhost_memory_map(struct vhost_dev *dev, hwaddr addr, | |
b897a474 | 305 | hwaddr *plen, bool is_write) |
c471ad0e JW |
306 | { |
307 | if (!vhost_dev_has_iommu(dev)) { | |
308 | return cpu_physical_memory_map(addr, plen, is_write); | |
309 | } else { | |
310 | return (void *)(uintptr_t)addr; | |
311 | } | |
312 | } | |
313 | ||
314 | static void vhost_memory_unmap(struct vhost_dev *dev, void *buffer, | |
315 | hwaddr len, int is_write, | |
316 | hwaddr access_len) | |
317 | { | |
318 | if (!vhost_dev_has_iommu(dev)) { | |
319 | cpu_physical_memory_unmap(buffer, len, is_write, access_len); | |
320 | } | |
321 | } | |
f1f9e6c5 | 322 | |
0ca1fd2d DDAG |
323 | static int vhost_verify_ring_part_mapping(void *ring_hva, |
324 | uint64_t ring_gpa, | |
325 | uint64_t ring_size, | |
326 | void *reg_hva, | |
327 | uint64_t reg_gpa, | |
328 | uint64_t reg_size) | |
f1f9e6c5 | 329 | { |
0ca1fd2d DDAG |
330 | uint64_t hva_ring_offset; |
331 | uint64_t ring_last = range_get_last(ring_gpa, ring_size); | |
332 | uint64_t reg_last = range_get_last(reg_gpa, reg_size); | |
f1f9e6c5 | 333 | |
0ca1fd2d | 334 | if (ring_last < reg_gpa || ring_gpa > reg_last) { |
f1f9e6c5 GK |
335 | return 0; |
336 | } | |
0ca1fd2d DDAG |
337 | /* check that whole ring's is mapped */ |
338 | if (ring_last > reg_last) { | |
339 | return -ENOMEM; | |
f1f9e6c5 | 340 | } |
0ca1fd2d DDAG |
341 | /* check that ring's MemoryRegion wasn't replaced */ |
342 | hva_ring_offset = ring_gpa - reg_gpa; | |
343 | if (ring_hva != reg_hva + hva_ring_offset) { | |
344 | return -EBUSY; | |
f1f9e6c5 | 345 | } |
0ca1fd2d DDAG |
346 | |
347 | return 0; | |
f1f9e6c5 GK |
348 | } |
349 | ||
d5970055 | 350 | static int vhost_verify_ring_mappings(struct vhost_dev *dev, |
0ca1fd2d DDAG |
351 | void *reg_hva, |
352 | uint64_t reg_gpa, | |
353 | uint64_t reg_size) | |
d5970055 | 354 | { |
f1f9e6c5 | 355 | int i, j; |
8617343f | 356 | int r = 0; |
f1f9e6c5 GK |
357 | const char *part_name[] = { |
358 | "descriptor table", | |
359 | "available ring", | |
360 | "used ring" | |
361 | }; | |
8617343f | 362 | |
aebbdbee JW |
363 | if (vhost_dev_has_iommu(dev)) { |
364 | return 0; | |
365 | } | |
366 | ||
f1f9e6c5 | 367 | for (i = 0; i < dev->nvqs; ++i) { |
d5970055 | 368 | struct vhost_virtqueue *vq = dev->vqs + i; |
d5970055 | 369 | |
fb20fbb7 JH |
370 | if (vq->desc_phys == 0) { |
371 | continue; | |
372 | } | |
373 | ||
f1f9e6c5 | 374 | j = 0; |
0ca1fd2d DDAG |
375 | r = vhost_verify_ring_part_mapping( |
376 | vq->desc, vq->desc_phys, vq->desc_size, | |
377 | reg_hva, reg_gpa, reg_size); | |
2fe45ec3 | 378 | if (r) { |
f1f9e6c5 | 379 | break; |
d5970055 | 380 | } |
f1f9e6c5 GK |
381 | |
382 | j++; | |
0ca1fd2d | 383 | r = vhost_verify_ring_part_mapping( |
9fac50c8 | 384 | vq->avail, vq->avail_phys, vq->avail_size, |
0ca1fd2d | 385 | reg_hva, reg_gpa, reg_size); |
2fe45ec3 | 386 | if (r) { |
f1f9e6c5 | 387 | break; |
d5970055 | 388 | } |
f1f9e6c5 GK |
389 | |
390 | j++; | |
0ca1fd2d | 391 | r = vhost_verify_ring_part_mapping( |
9fac50c8 | 392 | vq->used, vq->used_phys, vq->used_size, |
0ca1fd2d | 393 | reg_hva, reg_gpa, reg_size); |
2fe45ec3 | 394 | if (r) { |
f1f9e6c5 | 395 | break; |
d5970055 | 396 | } |
f1f9e6c5 GK |
397 | } |
398 | ||
399 | if (r == -ENOMEM) { | |
400 | error_report("Unable to map %s for ring %d", part_name[j], i); | |
401 | } else if (r == -EBUSY) { | |
402 | error_report("%s relocated for ring %d", part_name[j], i); | |
d5970055 | 403 | } |
8617343f | 404 | return r; |
d5970055 MT |
405 | } |
406 | ||
083b9bd7 AB |
407 | /* |
408 | * vhost_section: identify sections needed for vhost access | |
409 | * | |
410 | * We only care about RAM sections here (where virtqueue and guest | |
411 | * internals accessed by virtio might live). If we find one we still | |
412 | * allow the backend to potentially filter it out of our list. | |
413 | */ | |
988a2775 | 414 | static bool vhost_section(struct vhost_dev *dev, MemoryRegionSection *section) |
af603142 | 415 | { |
083b9bd7 AB |
416 | MemoryRegion *mr = section->mr; |
417 | ||
418 | if (memory_region_is_ram(mr) && !memory_region_is_rom(mr)) { | |
419 | uint8_t dirty_mask = memory_region_get_dirty_log_mask(mr); | |
420 | uint8_t handled_dirty; | |
421 | ||
422 | /* | |
423 | * Kernel based vhost doesn't handle any block which is doing | |
424 | * dirty-tracking other than migration for which it has | |
425 | * specific logging support. However for TCG the kernel never | |
426 | * gets involved anyway so we can also ignore it's | |
427 | * self-modiying code detection flags. However a vhost-user | |
428 | * client could still confuse a TCG guest if it re-writes | |
429 | * executable memory that has already been translated. | |
430 | */ | |
431 | handled_dirty = (1 << DIRTY_MEMORY_MIGRATION) | | |
432 | (1 << DIRTY_MEMORY_CODE); | |
433 | ||
434 | if (dirty_mask & ~handled_dirty) { | |
435 | trace_vhost_reject_section(mr->name, 1); | |
436 | return false; | |
437 | } | |
aa3c40f6 | 438 | |
083b9bd7 AB |
439 | if (dev->vhost_ops->vhost_backend_mem_section_filter && |
440 | !dev->vhost_ops->vhost_backend_mem_section_filter(dev, section)) { | |
441 | trace_vhost_reject_section(mr->name, 2); | |
442 | return false; | |
443 | } | |
988a2775 | 444 | |
083b9bd7 AB |
445 | trace_vhost_section(mr->name); |
446 | return true; | |
447 | } else { | |
448 | trace_vhost_reject_section(mr->name, 3); | |
449 | return false; | |
450 | } | |
af603142 NB |
451 | } |
452 | ||
453 | static void vhost_begin(MemoryListener *listener) | |
454 | { | |
455 | struct vhost_dev *dev = container_of(listener, struct vhost_dev, | |
456 | memory_listener); | |
c44317ef DDAG |
457 | dev->tmp_sections = NULL; |
458 | dev->n_tmp_sections = 0; | |
af603142 | 459 | } |
d5970055 | 460 | |
af603142 NB |
461 | static void vhost_commit(MemoryListener *listener) |
462 | { | |
463 | struct vhost_dev *dev = container_of(listener, struct vhost_dev, | |
464 | memory_listener); | |
c44317ef DDAG |
465 | MemoryRegionSection *old_sections; |
466 | int n_old_sections; | |
af603142 | 467 | uint64_t log_size; |
ade6d081 | 468 | size_t regions_size; |
af603142 | 469 | int r; |
0ca1fd2d | 470 | int i; |
ade6d081 | 471 | bool changed = false; |
af603142 | 472 | |
ade6d081 DDAG |
473 | /* Note we can be called before the device is started, but then |
474 | * starting the device calls set_mem_table, so we need to have | |
475 | * built the data structures. | |
476 | */ | |
c44317ef DDAG |
477 | old_sections = dev->mem_sections; |
478 | n_old_sections = dev->n_mem_sections; | |
479 | dev->mem_sections = dev->tmp_sections; | |
480 | dev->n_mem_sections = dev->n_tmp_sections; | |
481 | ||
ade6d081 DDAG |
482 | if (dev->n_mem_sections != n_old_sections) { |
483 | changed = true; | |
484 | } else { | |
485 | /* Same size, lets check the contents */ | |
3fc4a64c DDAG |
486 | for (int i = 0; i < n_old_sections; i++) { |
487 | if (!MemoryRegionSection_eq(&old_sections[i], | |
488 | &dev->mem_sections[i])) { | |
489 | changed = true; | |
490 | break; | |
491 | } | |
492 | } | |
af603142 | 493 | } |
ade6d081 DDAG |
494 | |
495 | trace_vhost_commit(dev->started, changed); | |
496 | if (!changed) { | |
c44317ef | 497 | goto out; |
d5970055 | 498 | } |
ade6d081 DDAG |
499 | |
500 | /* Rebuild the regions list from the new sections list */ | |
501 | regions_size = offsetof(struct vhost_memory, regions) + | |
502 | dev->n_mem_sections * sizeof dev->mem->regions[0]; | |
503 | dev->mem = g_realloc(dev->mem, regions_size); | |
504 | dev->mem->nregions = dev->n_mem_sections; | |
505 | used_memslots = dev->mem->nregions; | |
506 | for (i = 0; i < dev->n_mem_sections; i++) { | |
507 | struct vhost_memory_region *cur_vmr = dev->mem->regions + i; | |
508 | struct MemoryRegionSection *mrs = dev->mem_sections + i; | |
509 | ||
510 | cur_vmr->guest_phys_addr = mrs->offset_within_address_space; | |
511 | cur_vmr->memory_size = int128_get64(mrs->size); | |
512 | cur_vmr->userspace_addr = | |
513 | (uintptr_t)memory_region_get_ram_ptr(mrs->mr) + | |
514 | mrs->offset_within_region; | |
515 | cur_vmr->flags_padding = 0; | |
516 | } | |
517 | ||
518 | if (!dev->started) { | |
c44317ef | 519 | goto out; |
af603142 | 520 | } |
d5970055 | 521 | |
0ca1fd2d DDAG |
522 | for (i = 0; i < dev->mem->nregions; i++) { |
523 | if (vhost_verify_ring_mappings(dev, | |
524 | (void *)(uintptr_t)dev->mem->regions[i].userspace_addr, | |
525 | dev->mem->regions[i].guest_phys_addr, | |
526 | dev->mem->regions[i].memory_size)) { | |
527 | error_report("Verify ring failure on region %d", i); | |
528 | abort(); | |
529 | } | |
d5970055 MT |
530 | } |
531 | ||
532 | if (!dev->log_enabled) { | |
21e70425 | 533 | r = dev->vhost_ops->vhost_set_mem_table(dev, dev->mem); |
162bba7f MAL |
534 | if (r < 0) { |
535 | VHOST_OPS_DEBUG("vhost_set_mem_table failed"); | |
536 | } | |
c44317ef | 537 | goto out; |
d5970055 MT |
538 | } |
539 | log_size = vhost_get_log_size(dev); | |
540 | /* We allocate an extra 4K bytes to log, | |
541 | * to reduce the * number of reallocations. */ | |
542 | #define VHOST_LOG_BUFFER (0x1000 / sizeof *dev->log) | |
543 | /* To log more, must increase log size before table update. */ | |
544 | if (dev->log_size < log_size) { | |
545 | vhost_dev_log_resize(dev, log_size + VHOST_LOG_BUFFER); | |
546 | } | |
21e70425 | 547 | r = dev->vhost_ops->vhost_set_mem_table(dev, dev->mem); |
162bba7f MAL |
548 | if (r < 0) { |
549 | VHOST_OPS_DEBUG("vhost_set_mem_table failed"); | |
550 | } | |
d5970055 MT |
551 | /* To log less, can only decrease log size after table update. */ |
552 | if (dev->log_size > log_size + VHOST_LOG_BUFFER) { | |
553 | vhost_dev_log_resize(dev, log_size); | |
554 | } | |
c44317ef DDAG |
555 | |
556 | out: | |
557 | /* Deref the old list of sections, this must happen _after_ the | |
558 | * vhost_set_mem_table to ensure the client isn't still using the | |
559 | * section we're about to unref. | |
560 | */ | |
561 | while (n_old_sections--) { | |
562 | memory_region_unref(old_sections[n_old_sections].mr); | |
563 | } | |
564 | g_free(old_sections); | |
565 | return; | |
566 | } | |
567 | ||
48d7c975 DDAG |
568 | /* Adds the section data to the tmp_section structure. |
569 | * It relies on the listener calling us in memory address order | |
570 | * and for each region (via the _add and _nop methods) to | |
571 | * join neighbours. | |
572 | */ | |
573 | static void vhost_region_add_section(struct vhost_dev *dev, | |
574 | MemoryRegionSection *section) | |
c44317ef | 575 | { |
48d7c975 DDAG |
576 | bool need_add = true; |
577 | uint64_t mrs_size = int128_get64(section->size); | |
578 | uint64_t mrs_gpa = section->offset_within_address_space; | |
579 | uintptr_t mrs_host = (uintptr_t)memory_region_get_ram_ptr(section->mr) + | |
580 | section->offset_within_region; | |
c1ece84e | 581 | RAMBlock *mrs_rb = section->mr->ram_block; |
48d7c975 DDAG |
582 | |
583 | trace_vhost_region_add_section(section->mr->name, mrs_gpa, mrs_size, | |
584 | mrs_host); | |
585 | ||
83475056 | 586 | if (dev->vhost_ops->backend_type == VHOST_BACKEND_TYPE_USER) { |
76525114 DDAG |
587 | /* Round the section to it's page size */ |
588 | /* First align the start down to a page boundary */ | |
589 | size_t mrs_page = qemu_ram_pagesize(mrs_rb); | |
590 | uint64_t alignage = mrs_host & (mrs_page - 1); | |
591 | if (alignage) { | |
592 | mrs_host -= alignage; | |
593 | mrs_size += alignage; | |
594 | mrs_gpa -= alignage; | |
595 | } | |
596 | /* Now align the size up to a page boundary */ | |
597 | alignage = mrs_size & (mrs_page - 1); | |
598 | if (alignage) { | |
599 | mrs_size += mrs_page - alignage; | |
600 | } | |
83475056 MT |
601 | trace_vhost_region_add_section_aligned(section->mr->name, mrs_gpa, |
602 | mrs_size, mrs_host); | |
76525114 | 603 | } |
c1ece84e | 604 | |
48d7c975 DDAG |
605 | if (dev->n_tmp_sections) { |
606 | /* Since we already have at least one section, lets see if | |
607 | * this extends it; since we're scanning in order, we only | |
608 | * have to look at the last one, and the FlatView that calls | |
609 | * us shouldn't have overlaps. | |
610 | */ | |
611 | MemoryRegionSection *prev_sec = dev->tmp_sections + | |
612 | (dev->n_tmp_sections - 1); | |
613 | uint64_t prev_gpa_start = prev_sec->offset_within_address_space; | |
614 | uint64_t prev_size = int128_get64(prev_sec->size); | |
615 | uint64_t prev_gpa_end = range_get_last(prev_gpa_start, prev_size); | |
616 | uint64_t prev_host_start = | |
617 | (uintptr_t)memory_region_get_ram_ptr(prev_sec->mr) + | |
618 | prev_sec->offset_within_region; | |
619 | uint64_t prev_host_end = range_get_last(prev_host_start, prev_size); | |
620 | ||
c1ece84e DDAG |
621 | if (mrs_gpa <= (prev_gpa_end + 1)) { |
622 | /* OK, looks like overlapping/intersecting - it's possible that | |
623 | * the rounding to page sizes has made them overlap, but they should | |
624 | * match up in the same RAMBlock if they do. | |
625 | */ | |
626 | if (mrs_gpa < prev_gpa_start) { | |
ff477614 DDAG |
627 | error_report("%s:Section '%s' rounded to %"PRIx64 |
628 | " prior to previous '%s' %"PRIx64, | |
629 | __func__, section->mr->name, mrs_gpa, | |
630 | prev_sec->mr->name, prev_gpa_start); | |
c1ece84e DDAG |
631 | /* A way to cleanly fail here would be better */ |
632 | return; | |
633 | } | |
634 | /* Offset from the start of the previous GPA to this GPA */ | |
635 | size_t offset = mrs_gpa - prev_gpa_start; | |
636 | ||
637 | if (prev_host_start + offset == mrs_host && | |
638 | section->mr == prev_sec->mr && | |
639 | (!dev->vhost_ops->vhost_backend_can_merge || | |
640 | dev->vhost_ops->vhost_backend_can_merge(dev, | |
48d7c975 DDAG |
641 | mrs_host, mrs_size, |
642 | prev_host_start, prev_size))) { | |
c1ece84e DDAG |
643 | uint64_t max_end = MAX(prev_host_end, mrs_host + mrs_size); |
644 | need_add = false; | |
645 | prev_sec->offset_within_address_space = | |
646 | MIN(prev_gpa_start, mrs_gpa); | |
647 | prev_sec->offset_within_region = | |
648 | MIN(prev_host_start, mrs_host) - | |
649 | (uintptr_t)memory_region_get_ram_ptr(prev_sec->mr); | |
650 | prev_sec->size = int128_make64(max_end - MIN(prev_host_start, | |
651 | mrs_host)); | |
652 | trace_vhost_region_add_section_merge(section->mr->name, | |
653 | int128_get64(prev_sec->size), | |
654 | prev_sec->offset_within_address_space, | |
655 | prev_sec->offset_within_region); | |
656 | } else { | |
e7b94a84 DDAG |
657 | /* adjoining regions are fine, but overlapping ones with |
658 | * different blocks/offsets shouldn't happen | |
659 | */ | |
660 | if (mrs_gpa != prev_gpa_end + 1) { | |
661 | error_report("%s: Overlapping but not coherent sections " | |
662 | "at %"PRIx64, | |
663 | __func__, mrs_gpa); | |
664 | return; | |
665 | } | |
c1ece84e | 666 | } |
48d7c975 DDAG |
667 | } |
668 | } | |
669 | ||
670 | if (need_add) { | |
671 | ++dev->n_tmp_sections; | |
672 | dev->tmp_sections = g_renew(MemoryRegionSection, dev->tmp_sections, | |
673 | dev->n_tmp_sections); | |
674 | dev->tmp_sections[dev->n_tmp_sections - 1] = *section; | |
675 | /* The flatview isn't stable and we don't use it, making it NULL | |
676 | * means we can memcmp the list. | |
677 | */ | |
678 | dev->tmp_sections[dev->n_tmp_sections - 1].fv = NULL; | |
679 | memory_region_ref(section->mr); | |
680 | } | |
50c1e149 AK |
681 | } |
682 | ||
938eeb64 DDAG |
683 | /* Used for both add and nop callbacks */ |
684 | static void vhost_region_addnop(MemoryListener *listener, | |
685 | MemoryRegionSection *section) | |
04097f7c | 686 | { |
2817b260 AK |
687 | struct vhost_dev *dev = container_of(listener, struct vhost_dev, |
688 | memory_listener); | |
689 | ||
988a2775 | 690 | if (!vhost_section(dev, section)) { |
c49450b9 AK |
691 | return; |
692 | } | |
48d7c975 | 693 | vhost_region_add_section(dev, section); |
04097f7c AK |
694 | } |
695 | ||
375f74f4 JW |
696 | static void vhost_iommu_unmap_notify(IOMMUNotifier *n, IOMMUTLBEntry *iotlb) |
697 | { | |
698 | struct vhost_iommu *iommu = container_of(n, struct vhost_iommu, n); | |
699 | struct vhost_dev *hdev = iommu->hdev; | |
700 | hwaddr iova = iotlb->iova + iommu->iommu_offset; | |
701 | ||
020e571b MC |
702 | if (vhost_backend_invalidate_device_iotlb(hdev, iova, |
703 | iotlb->addr_mask + 1)) { | |
375f74f4 JW |
704 | error_report("Fail to invalidate device iotlb"); |
705 | } | |
706 | } | |
707 | ||
708 | static void vhost_iommu_region_add(MemoryListener *listener, | |
709 | MemoryRegionSection *section) | |
710 | { | |
711 | struct vhost_dev *dev = container_of(listener, struct vhost_dev, | |
712 | iommu_listener); | |
713 | struct vhost_iommu *iommu; | |
698feb5e | 714 | Int128 end; |
805d4496 | 715 | int iommu_idx; |
388a86df | 716 | IOMMUMemoryRegion *iommu_mr; |
375f74f4 JW |
717 | |
718 | if (!memory_region_is_iommu(section->mr)) { | |
719 | return; | |
720 | } | |
721 | ||
388a86df TB |
722 | iommu_mr = IOMMU_MEMORY_REGION(section->mr); |
723 | ||
375f74f4 | 724 | iommu = g_malloc0(sizeof(*iommu)); |
698feb5e PX |
725 | end = int128_add(int128_make64(section->offset_within_region), |
726 | section->size); | |
727 | end = int128_sub(end, int128_one()); | |
cb1efcf4 PM |
728 | iommu_idx = memory_region_iommu_attrs_to_index(iommu_mr, |
729 | MEMTXATTRS_UNSPECIFIED); | |
698feb5e PX |
730 | iommu_notifier_init(&iommu->n, vhost_iommu_unmap_notify, |
731 | IOMMU_NOTIFIER_UNMAP, | |
732 | section->offset_within_region, | |
cb1efcf4 PM |
733 | int128_get64(end), |
734 | iommu_idx); | |
375f74f4 JW |
735 | iommu->mr = section->mr; |
736 | iommu->iommu_offset = section->offset_within_address_space - | |
737 | section->offset_within_region; | |
738 | iommu->hdev = dev; | |
805d4496 MA |
739 | memory_region_register_iommu_notifier(section->mr, &iommu->n, |
740 | &error_fatal); | |
375f74f4 JW |
741 | QLIST_INSERT_HEAD(&dev->iommu_list, iommu, iommu_next); |
742 | /* TODO: can replay help performance here? */ | |
743 | } | |
744 | ||
745 | static void vhost_iommu_region_del(MemoryListener *listener, | |
746 | MemoryRegionSection *section) | |
747 | { | |
748 | struct vhost_dev *dev = container_of(listener, struct vhost_dev, | |
749 | iommu_listener); | |
750 | struct vhost_iommu *iommu; | |
751 | ||
752 | if (!memory_region_is_iommu(section->mr)) { | |
753 | return; | |
754 | } | |
755 | ||
756 | QLIST_FOREACH(iommu, &dev->iommu_list, iommu_next) { | |
698feb5e PX |
757 | if (iommu->mr == section->mr && |
758 | iommu->n.start == section->offset_within_region) { | |
375f74f4 JW |
759 | memory_region_unregister_iommu_notifier(iommu->mr, |
760 | &iommu->n); | |
761 | QLIST_REMOVE(iommu, iommu_next); | |
762 | g_free(iommu); | |
763 | break; | |
764 | } | |
765 | } | |
766 | } | |
767 | ||
d5970055 MT |
768 | static int vhost_virtqueue_set_addr(struct vhost_dev *dev, |
769 | struct vhost_virtqueue *vq, | |
770 | unsigned idx, bool enable_log) | |
771 | { | |
b4ab225c CL |
772 | struct vhost_vring_addr addr; |
773 | int r; | |
774 | memset(&addr, 0, sizeof(struct vhost_vring_addr)); | |
775 | ||
776 | if (dev->vhost_ops->vhost_vq_get_addr) { | |
777 | r = dev->vhost_ops->vhost_vq_get_addr(dev, &addr, vq); | |
778 | if (r < 0) { | |
779 | VHOST_OPS_DEBUG("vhost_vq_get_addr failed"); | |
780 | return -errno; | |
781 | } | |
782 | } else { | |
783 | addr.desc_user_addr = (uint64_t)(unsigned long)vq->desc; | |
784 | addr.avail_user_addr = (uint64_t)(unsigned long)vq->avail; | |
785 | addr.used_user_addr = (uint64_t)(unsigned long)vq->used; | |
786 | } | |
787 | addr.index = idx; | |
788 | addr.log_guest_addr = vq->used_phys; | |
789 | addr.flags = enable_log ? (1 << VHOST_VRING_F_LOG) : 0; | |
790 | r = dev->vhost_ops->vhost_set_vring_addr(dev, &addr); | |
d5970055 | 791 | if (r < 0) { |
c6409692 | 792 | VHOST_OPS_DEBUG("vhost_set_vring_addr failed"); |
d5970055 MT |
793 | return -errno; |
794 | } | |
795 | return 0; | |
796 | } | |
797 | ||
c471ad0e JW |
798 | static int vhost_dev_set_features(struct vhost_dev *dev, |
799 | bool enable_log) | |
d5970055 MT |
800 | { |
801 | uint64_t features = dev->acked_features; | |
802 | int r; | |
803 | if (enable_log) { | |
9a2ba823 | 804 | features |= 0x1ULL << VHOST_F_LOG_ALL; |
d5970055 | 805 | } |
f7ef7e6e JW |
806 | if (!vhost_dev_has_iommu(dev)) { |
807 | features &= ~(0x1ULL << VIRTIO_F_IOMMU_PLATFORM); | |
808 | } | |
7a471694 CL |
809 | if (dev->vhost_ops->vhost_force_iommu) { |
810 | if (dev->vhost_ops->vhost_force_iommu(dev) == true) { | |
811 | features |= 0x1ULL << VIRTIO_F_IOMMU_PLATFORM; | |
812 | } | |
813 | } | |
21e70425 | 814 | r = dev->vhost_ops->vhost_set_features(dev, features); |
c6409692 MAL |
815 | if (r < 0) { |
816 | VHOST_OPS_DEBUG("vhost_set_features failed"); | |
b37556ed JW |
817 | goto out; |
818 | } | |
819 | if (dev->vhost_ops->vhost_set_backend_cap) { | |
820 | r = dev->vhost_ops->vhost_set_backend_cap(dev); | |
821 | if (r < 0) { | |
822 | VHOST_OPS_DEBUG("vhost_set_backend_cap failed"); | |
823 | goto out; | |
824 | } | |
c6409692 | 825 | } |
b37556ed JW |
826 | |
827 | out: | |
d5970055 MT |
828 | return r < 0 ? -errno : 0; |
829 | } | |
830 | ||
831 | static int vhost_dev_set_log(struct vhost_dev *dev, bool enable_log) | |
832 | { | |
162bba7f | 833 | int r, i, idx; |
1e5a050f DS |
834 | hwaddr addr; |
835 | ||
d5970055 MT |
836 | r = vhost_dev_set_features(dev, enable_log); |
837 | if (r < 0) { | |
838 | goto err_features; | |
839 | } | |
840 | for (i = 0; i < dev->nvqs; ++i) { | |
25a2a920 | 841 | idx = dev->vhost_ops->vhost_get_vq_index(dev, dev->vq_index + i); |
1e5a050f DS |
842 | addr = virtio_queue_get_desc_addr(dev->vdev, idx); |
843 | if (!addr) { | |
844 | /* | |
845 | * The queue might not be ready for start. If this | |
846 | * is the case there is no reason to continue the process. | |
847 | * The similar logic is used by the vhost_virtqueue_start() | |
848 | * routine. | |
849 | */ | |
850 | continue; | |
851 | } | |
25a2a920 | 852 | r = vhost_virtqueue_set_addr(dev, dev->vqs + i, idx, |
d5970055 MT |
853 | enable_log); |
854 | if (r < 0) { | |
855 | goto err_vq; | |
856 | } | |
857 | } | |
858 | return 0; | |
859 | err_vq: | |
860 | for (; i >= 0; --i) { | |
25a2a920 | 861 | idx = dev->vhost_ops->vhost_get_vq_index(dev, dev->vq_index + i); |
162bba7f MAL |
862 | vhost_virtqueue_set_addr(dev, dev->vqs + i, idx, |
863 | dev->log_enabled); | |
d5970055 | 864 | } |
162bba7f | 865 | vhost_dev_set_features(dev, dev->log_enabled); |
d5970055 MT |
866 | err_features: |
867 | return r; | |
868 | } | |
869 | ||
705f7f2f | 870 | static int vhost_migration_log(MemoryListener *listener, bool enable) |
d5970055 | 871 | { |
04097f7c AK |
872 | struct vhost_dev *dev = container_of(listener, struct vhost_dev, |
873 | memory_listener); | |
d5970055 | 874 | int r; |
705f7f2f | 875 | if (enable == dev->log_enabled) { |
d5970055 MT |
876 | return 0; |
877 | } | |
878 | if (!dev->started) { | |
879 | dev->log_enabled = enable; | |
880 | return 0; | |
881 | } | |
f5b22d06 DS |
882 | |
883 | r = 0; | |
d5970055 MT |
884 | if (!enable) { |
885 | r = vhost_dev_set_log(dev, false); | |
886 | if (r < 0) { | |
f5b22d06 | 887 | goto check_dev_state; |
d5970055 | 888 | } |
309750fa | 889 | vhost_log_put(dev, false); |
d5970055 MT |
890 | } else { |
891 | vhost_dev_log_resize(dev, vhost_get_log_size(dev)); | |
892 | r = vhost_dev_set_log(dev, true); | |
893 | if (r < 0) { | |
f5b22d06 | 894 | goto check_dev_state; |
d5970055 MT |
895 | } |
896 | } | |
f5b22d06 DS |
897 | |
898 | check_dev_state: | |
d5970055 | 899 | dev->log_enabled = enable; |
f5b22d06 DS |
900 | /* |
901 | * vhost-user-* devices could change their state during log | |
902 | * initialization due to disconnect. So check dev state after | |
903 | * vhost communication. | |
904 | */ | |
905 | if (!dev->started) { | |
906 | /* | |
907 | * Since device is in the stopped state, it is okay for | |
908 | * migration. Return success. | |
909 | */ | |
910 | r = 0; | |
911 | } | |
912 | if (r) { | |
913 | /* An error is occured. */ | |
914 | dev->log_enabled = false; | |
915 | } | |
916 | ||
917 | return r; | |
d5970055 MT |
918 | } |
919 | ||
04097f7c AK |
920 | static void vhost_log_global_start(MemoryListener *listener) |
921 | { | |
922 | int r; | |
923 | ||
924 | r = vhost_migration_log(listener, true); | |
925 | if (r < 0) { | |
926 | abort(); | |
927 | } | |
928 | } | |
929 | ||
930 | static void vhost_log_global_stop(MemoryListener *listener) | |
931 | { | |
932 | int r; | |
933 | ||
934 | r = vhost_migration_log(listener, false); | |
935 | if (r < 0) { | |
936 | abort(); | |
937 | } | |
938 | } | |
939 | ||
940 | static void vhost_log_start(MemoryListener *listener, | |
b2dfd71c PB |
941 | MemoryRegionSection *section, |
942 | int old, int new) | |
04097f7c AK |
943 | { |
944 | /* FIXME: implement */ | |
945 | } | |
946 | ||
947 | static void vhost_log_stop(MemoryListener *listener, | |
b2dfd71c PB |
948 | MemoryRegionSection *section, |
949 | int old, int new) | |
04097f7c AK |
950 | { |
951 | /* FIXME: implement */ | |
952 | } | |
953 | ||
46f70ff1 GK |
954 | /* The vhost driver natively knows how to handle the vrings of non |
955 | * cross-endian legacy devices and modern devices. Only legacy devices | |
956 | * exposed to a bi-endian guest may require the vhost driver to use a | |
957 | * specific endianness. | |
958 | */ | |
a122ab24 GK |
959 | static inline bool vhost_needs_vring_endian(VirtIODevice *vdev) |
960 | { | |
e5848123 GK |
961 | if (virtio_vdev_has_feature(vdev, VIRTIO_F_VERSION_1)) { |
962 | return false; | |
963 | } | |
a122ab24 | 964 | #ifdef HOST_WORDS_BIGENDIAN |
46f70ff1 | 965 | return vdev->device_endian == VIRTIO_DEVICE_ENDIAN_LITTLE; |
a122ab24 | 966 | #else |
46f70ff1 | 967 | return vdev->device_endian == VIRTIO_DEVICE_ENDIAN_BIG; |
a122ab24 | 968 | #endif |
a122ab24 GK |
969 | } |
970 | ||
04b7a152 GK |
971 | static int vhost_virtqueue_set_vring_endian_legacy(struct vhost_dev *dev, |
972 | bool is_big_endian, | |
973 | int vhost_vq_index) | |
974 | { | |
975 | struct vhost_vring_state s = { | |
976 | .index = vhost_vq_index, | |
977 | .num = is_big_endian | |
978 | }; | |
979 | ||
21e70425 | 980 | if (!dev->vhost_ops->vhost_set_vring_endian(dev, &s)) { |
04b7a152 GK |
981 | return 0; |
982 | } | |
983 | ||
c6409692 | 984 | VHOST_OPS_DEBUG("vhost_set_vring_endian failed"); |
04b7a152 GK |
985 | if (errno == ENOTTY) { |
986 | error_report("vhost does not support cross-endian"); | |
987 | return -ENOSYS; | |
988 | } | |
989 | ||
990 | return -errno; | |
991 | } | |
992 | ||
c471ad0e JW |
993 | static int vhost_memory_region_lookup(struct vhost_dev *hdev, |
994 | uint64_t gpa, uint64_t *uaddr, | |
995 | uint64_t *len) | |
996 | { | |
997 | int i; | |
998 | ||
999 | for (i = 0; i < hdev->mem->nregions; i++) { | |
1000 | struct vhost_memory_region *reg = hdev->mem->regions + i; | |
1001 | ||
1002 | if (gpa >= reg->guest_phys_addr && | |
1003 | reg->guest_phys_addr + reg->memory_size > gpa) { | |
1004 | *uaddr = reg->userspace_addr + gpa - reg->guest_phys_addr; | |
1005 | *len = reg->guest_phys_addr + reg->memory_size - gpa; | |
1006 | return 0; | |
1007 | } | |
1008 | } | |
1009 | ||
1010 | return -EFAULT; | |
1011 | } | |
1012 | ||
fc58bd0d | 1013 | int vhost_device_iotlb_miss(struct vhost_dev *dev, uint64_t iova, int write) |
c471ad0e JW |
1014 | { |
1015 | IOMMUTLBEntry iotlb; | |
1016 | uint64_t uaddr, len; | |
fc58bd0d | 1017 | int ret = -EFAULT; |
c471ad0e | 1018 | |
7a064bcc | 1019 | RCU_READ_LOCK_GUARD(); |
c471ad0e | 1020 | |
ffcbbe72 PX |
1021 | trace_vhost_iotlb_miss(dev, 1); |
1022 | ||
c471ad0e | 1023 | iotlb = address_space_get_iotlb_entry(dev->vdev->dma_as, |
7446eb07 PM |
1024 | iova, write, |
1025 | MEMTXATTRS_UNSPECIFIED); | |
c471ad0e | 1026 | if (iotlb.target_as != NULL) { |
fc58bd0d MC |
1027 | ret = vhost_memory_region_lookup(dev, iotlb.translated_addr, |
1028 | &uaddr, &len); | |
1029 | if (ret) { | |
ffcbbe72 | 1030 | trace_vhost_iotlb_miss(dev, 3); |
c471ad0e JW |
1031 | error_report("Fail to lookup the translated address " |
1032 | "%"PRIx64, iotlb.translated_addr); | |
1033 | goto out; | |
1034 | } | |
1035 | ||
1036 | len = MIN(iotlb.addr_mask + 1, len); | |
1037 | iova = iova & ~iotlb.addr_mask; | |
1038 | ||
020e571b MC |
1039 | ret = vhost_backend_update_device_iotlb(dev, iova, uaddr, |
1040 | len, iotlb.perm); | |
fc58bd0d | 1041 | if (ret) { |
ffcbbe72 | 1042 | trace_vhost_iotlb_miss(dev, 4); |
c471ad0e JW |
1043 | error_report("Fail to update device iotlb"); |
1044 | goto out; | |
1045 | } | |
1046 | } | |
ffcbbe72 PX |
1047 | |
1048 | trace_vhost_iotlb_miss(dev, 2); | |
1049 | ||
c471ad0e | 1050 | out: |
fc58bd0d | 1051 | return ret; |
c471ad0e JW |
1052 | } |
1053 | ||
f56a1247 | 1054 | static int vhost_virtqueue_start(struct vhost_dev *dev, |
d5970055 MT |
1055 | struct VirtIODevice *vdev, |
1056 | struct vhost_virtqueue *vq, | |
1057 | unsigned idx) | |
1058 | { | |
96a3d98d JW |
1059 | BusState *qbus = BUS(qdev_get_parent_bus(DEVICE(vdev))); |
1060 | VirtioBusState *vbus = VIRTIO_BUS(qbus); | |
1061 | VirtioBusClass *k = VIRTIO_BUS_GET_CLASS(vbus); | |
a8170e5e | 1062 | hwaddr s, l, a; |
d5970055 | 1063 | int r; |
21e70425 | 1064 | int vhost_vq_index = dev->vhost_ops->vhost_get_vq_index(dev, idx); |
d5970055 | 1065 | struct vhost_vring_file file = { |
a9f98bb5 | 1066 | .index = vhost_vq_index |
d5970055 MT |
1067 | }; |
1068 | struct vhost_vring_state state = { | |
a9f98bb5 | 1069 | .index = vhost_vq_index |
d5970055 MT |
1070 | }; |
1071 | struct VirtQueue *vvq = virtio_get_queue(vdev, idx); | |
1072 | ||
fb20fbb7 JH |
1073 | a = virtio_queue_get_desc_addr(vdev, idx); |
1074 | if (a == 0) { | |
1075 | /* Queue might not be ready for start */ | |
1076 | return 0; | |
1077 | } | |
a9f98bb5 | 1078 | |
d5970055 | 1079 | vq->num = state.num = virtio_queue_get_num(vdev, idx); |
21e70425 | 1080 | r = dev->vhost_ops->vhost_set_vring_num(dev, &state); |
d5970055 | 1081 | if (r) { |
c6409692 | 1082 | VHOST_OPS_DEBUG("vhost_set_vring_num failed"); |
d5970055 MT |
1083 | return -errno; |
1084 | } | |
1085 | ||
1086 | state.num = virtio_queue_get_last_avail_idx(vdev, idx); | |
21e70425 | 1087 | r = dev->vhost_ops->vhost_set_vring_base(dev, &state); |
d5970055 | 1088 | if (r) { |
c6409692 | 1089 | VHOST_OPS_DEBUG("vhost_set_vring_base failed"); |
d5970055 MT |
1090 | return -errno; |
1091 | } | |
1092 | ||
e5848123 | 1093 | if (vhost_needs_vring_endian(vdev)) { |
04b7a152 GK |
1094 | r = vhost_virtqueue_set_vring_endian_legacy(dev, |
1095 | virtio_is_big_endian(vdev), | |
1096 | vhost_vq_index); | |
1097 | if (r) { | |
1098 | return -errno; | |
1099 | } | |
1100 | } | |
1101 | ||
f1f9e6c5 | 1102 | vq->desc_size = s = l = virtio_queue_get_desc_size(vdev, idx); |
fb20fbb7 | 1103 | vq->desc_phys = a; |
b897a474 | 1104 | vq->desc = vhost_memory_map(dev, a, &l, false); |
d5970055 MT |
1105 | if (!vq->desc || l != s) { |
1106 | r = -ENOMEM; | |
1107 | goto fail_alloc_desc; | |
1108 | } | |
f1f9e6c5 GK |
1109 | vq->avail_size = s = l = virtio_queue_get_avail_size(vdev, idx); |
1110 | vq->avail_phys = a = virtio_queue_get_avail_addr(vdev, idx); | |
b897a474 | 1111 | vq->avail = vhost_memory_map(dev, a, &l, false); |
d5970055 MT |
1112 | if (!vq->avail || l != s) { |
1113 | r = -ENOMEM; | |
1114 | goto fail_alloc_avail; | |
1115 | } | |
1116 | vq->used_size = s = l = virtio_queue_get_used_size(vdev, idx); | |
1117 | vq->used_phys = a = virtio_queue_get_used_addr(vdev, idx); | |
b897a474 | 1118 | vq->used = vhost_memory_map(dev, a, &l, true); |
d5970055 MT |
1119 | if (!vq->used || l != s) { |
1120 | r = -ENOMEM; | |
1121 | goto fail_alloc_used; | |
1122 | } | |
1123 | ||
a9f98bb5 | 1124 | r = vhost_virtqueue_set_addr(dev, vq, vhost_vq_index, dev->log_enabled); |
d5970055 MT |
1125 | if (r < 0) { |
1126 | r = -errno; | |
1127 | goto fail_alloc; | |
1128 | } | |
a9f98bb5 | 1129 | |
d5970055 | 1130 | file.fd = event_notifier_get_fd(virtio_queue_get_host_notifier(vvq)); |
21e70425 | 1131 | r = dev->vhost_ops->vhost_set_vring_kick(dev, &file); |
d5970055 | 1132 | if (r) { |
c6409692 | 1133 | VHOST_OPS_DEBUG("vhost_set_vring_kick failed"); |
c8852121 | 1134 | r = -errno; |
d5970055 MT |
1135 | goto fail_kick; |
1136 | } | |
1137 | ||
f56a1247 MT |
1138 | /* Clear and discard previous events if any. */ |
1139 | event_notifier_test_and_clear(&vq->masked_notifier); | |
d5970055 | 1140 | |
5669655a VK |
1141 | /* Init vring in unmasked state, unless guest_notifier_mask |
1142 | * will do it later. | |
1143 | */ | |
1144 | if (!vdev->use_guest_notifier_mask) { | |
1145 | /* TODO: check and handle errors. */ | |
1146 | vhost_virtqueue_mask(dev, vdev, idx, false); | |
1147 | } | |
1148 | ||
96a3d98d JW |
1149 | if (k->query_guest_notifiers && |
1150 | k->query_guest_notifiers(qbus->parent) && | |
1151 | virtio_queue_vector(vdev, idx) == VIRTIO_NO_VECTOR) { | |
1152 | file.fd = -1; | |
1153 | r = dev->vhost_ops->vhost_set_vring_call(dev, &file); | |
1154 | if (r) { | |
1155 | goto fail_vector; | |
1156 | } | |
1157 | } | |
1158 | ||
d5970055 MT |
1159 | return 0; |
1160 | ||
96a3d98d | 1161 | fail_vector: |
d5970055 | 1162 | fail_kick: |
d5970055 | 1163 | fail_alloc: |
c471ad0e JW |
1164 | vhost_memory_unmap(dev, vq->used, virtio_queue_get_used_size(vdev, idx), |
1165 | 0, 0); | |
d5970055 | 1166 | fail_alloc_used: |
c471ad0e JW |
1167 | vhost_memory_unmap(dev, vq->avail, virtio_queue_get_avail_size(vdev, idx), |
1168 | 0, 0); | |
d5970055 | 1169 | fail_alloc_avail: |
c471ad0e JW |
1170 | vhost_memory_unmap(dev, vq->desc, virtio_queue_get_desc_size(vdev, idx), |
1171 | 0, 0); | |
d5970055 MT |
1172 | fail_alloc_desc: |
1173 | return r; | |
1174 | } | |
1175 | ||
f56a1247 | 1176 | static void vhost_virtqueue_stop(struct vhost_dev *dev, |
d5970055 MT |
1177 | struct VirtIODevice *vdev, |
1178 | struct vhost_virtqueue *vq, | |
1179 | unsigned idx) | |
1180 | { | |
21e70425 | 1181 | int vhost_vq_index = dev->vhost_ops->vhost_get_vq_index(dev, idx); |
d5970055 | 1182 | struct vhost_vring_state state = { |
04b7a152 | 1183 | .index = vhost_vq_index, |
d5970055 MT |
1184 | }; |
1185 | int r; | |
fb20fbb7 | 1186 | |
fa4ae4be | 1187 | if (virtio_queue_get_desc_addr(vdev, idx) == 0) { |
fb20fbb7 JH |
1188 | /* Don't stop the virtqueue which might have not been started */ |
1189 | return; | |
1190 | } | |
fc57fd99 | 1191 | |
21e70425 | 1192 | r = dev->vhost_ops->vhost_get_vring_base(dev, &state); |
d5970055 | 1193 | if (r < 0) { |
31618958 | 1194 | VHOST_OPS_DEBUG("vhost VQ %u ring restore failed: %d", idx, r); |
2ae39a11 MC |
1195 | /* Connection to the backend is broken, so let's sync internal |
1196 | * last avail idx to the device used idx. | |
1197 | */ | |
1198 | virtio_queue_restore_last_avail_idx(vdev, idx); | |
499c5579 MAL |
1199 | } else { |
1200 | virtio_queue_set_last_avail_idx(vdev, idx, state.num); | |
d5970055 | 1201 | } |
3561ba14 | 1202 | virtio_queue_invalidate_signalled_used(vdev, idx); |
aa94d521 | 1203 | virtio_queue_update_used_idx(vdev, idx); |
04b7a152 GK |
1204 | |
1205 | /* In the cross-endian case, we need to reset the vring endianness to | |
1206 | * native as legacy devices expect so by default. | |
1207 | */ | |
e5848123 | 1208 | if (vhost_needs_vring_endian(vdev)) { |
162bba7f MAL |
1209 | vhost_virtqueue_set_vring_endian_legacy(dev, |
1210 | !virtio_is_big_endian(vdev), | |
1211 | vhost_vq_index); | |
04b7a152 GK |
1212 | } |
1213 | ||
c471ad0e JW |
1214 | vhost_memory_unmap(dev, vq->used, virtio_queue_get_used_size(vdev, idx), |
1215 | 1, virtio_queue_get_used_size(vdev, idx)); | |
1216 | vhost_memory_unmap(dev, vq->avail, virtio_queue_get_avail_size(vdev, idx), | |
1217 | 0, virtio_queue_get_avail_size(vdev, idx)); | |
1218 | vhost_memory_unmap(dev, vq->desc, virtio_queue_get_desc_size(vdev, idx), | |
1219 | 0, virtio_queue_get_desc_size(vdev, idx)); | |
d5970055 MT |
1220 | } |
1221 | ||
80a1ea37 AK |
1222 | static void vhost_eventfd_add(MemoryListener *listener, |
1223 | MemoryRegionSection *section, | |
753d5e14 | 1224 | bool match_data, uint64_t data, EventNotifier *e) |
80a1ea37 AK |
1225 | { |
1226 | } | |
1227 | ||
1228 | static void vhost_eventfd_del(MemoryListener *listener, | |
1229 | MemoryRegionSection *section, | |
753d5e14 | 1230 | bool match_data, uint64_t data, EventNotifier *e) |
80a1ea37 AK |
1231 | { |
1232 | } | |
1233 | ||
69e87b32 JW |
1234 | static int vhost_virtqueue_set_busyloop_timeout(struct vhost_dev *dev, |
1235 | int n, uint32_t timeout) | |
1236 | { | |
1237 | int vhost_vq_index = dev->vhost_ops->vhost_get_vq_index(dev, n); | |
1238 | struct vhost_vring_state state = { | |
1239 | .index = vhost_vq_index, | |
1240 | .num = timeout, | |
1241 | }; | |
1242 | int r; | |
1243 | ||
1244 | if (!dev->vhost_ops->vhost_set_vring_busyloop_timeout) { | |
1245 | return -EINVAL; | |
1246 | } | |
1247 | ||
1248 | r = dev->vhost_ops->vhost_set_vring_busyloop_timeout(dev, &state); | |
1249 | if (r) { | |
c6409692 | 1250 | VHOST_OPS_DEBUG("vhost_set_vring_busyloop_timeout failed"); |
69e87b32 JW |
1251 | return r; |
1252 | } | |
1253 | ||
1254 | return 0; | |
1255 | } | |
1256 | ||
f56a1247 MT |
1257 | static int vhost_virtqueue_init(struct vhost_dev *dev, |
1258 | struct vhost_virtqueue *vq, int n) | |
1259 | { | |
21e70425 | 1260 | int vhost_vq_index = dev->vhost_ops->vhost_get_vq_index(dev, n); |
f56a1247 | 1261 | struct vhost_vring_file file = { |
b931bfbf | 1262 | .index = vhost_vq_index, |
f56a1247 MT |
1263 | }; |
1264 | int r = event_notifier_init(&vq->masked_notifier, 0); | |
1265 | if (r < 0) { | |
1266 | return r; | |
1267 | } | |
1268 | ||
1269 | file.fd = event_notifier_get_fd(&vq->masked_notifier); | |
21e70425 | 1270 | r = dev->vhost_ops->vhost_set_vring_call(dev, &file); |
f56a1247 | 1271 | if (r) { |
c6409692 | 1272 | VHOST_OPS_DEBUG("vhost_set_vring_call failed"); |
f56a1247 MT |
1273 | r = -errno; |
1274 | goto fail_call; | |
1275 | } | |
c471ad0e JW |
1276 | |
1277 | vq->dev = dev; | |
1278 | ||
f56a1247 MT |
1279 | return 0; |
1280 | fail_call: | |
1281 | event_notifier_cleanup(&vq->masked_notifier); | |
1282 | return r; | |
1283 | } | |
1284 | ||
1285 | static void vhost_virtqueue_cleanup(struct vhost_virtqueue *vq) | |
1286 | { | |
1287 | event_notifier_cleanup(&vq->masked_notifier); | |
1288 | } | |
1289 | ||
81647a65 | 1290 | int vhost_dev_init(struct vhost_dev *hdev, void *opaque, |
69e87b32 | 1291 | VhostBackendType backend_type, uint32_t busyloop_timeout) |
d5970055 MT |
1292 | { |
1293 | uint64_t features; | |
a06db3ec | 1294 | int i, r, n_initialized_vqs = 0; |
fe44dc91 | 1295 | Error *local_err = NULL; |
81647a65 | 1296 | |
c471ad0e | 1297 | hdev->vdev = NULL; |
d2fc4402 MAL |
1298 | hdev->migration_blocker = NULL; |
1299 | ||
7cb8a9b9 MAL |
1300 | r = vhost_set_backend_type(hdev, backend_type); |
1301 | assert(r >= 0); | |
1a1bfac9 | 1302 | |
7cb8a9b9 MAL |
1303 | r = hdev->vhost_ops->vhost_backend_init(hdev, opaque); |
1304 | if (r < 0) { | |
1305 | goto fail; | |
24d1eb33 NN |
1306 | } |
1307 | ||
21e70425 | 1308 | r = hdev->vhost_ops->vhost_set_owner(hdev); |
d5970055 | 1309 | if (r < 0) { |
c6409692 | 1310 | VHOST_OPS_DEBUG("vhost_set_owner failed"); |
d5970055 MT |
1311 | goto fail; |
1312 | } | |
1313 | ||
21e70425 | 1314 | r = hdev->vhost_ops->vhost_get_features(hdev, &features); |
d5970055 | 1315 | if (r < 0) { |
c6409692 | 1316 | VHOST_OPS_DEBUG("vhost_get_features failed"); |
d5970055 MT |
1317 | goto fail; |
1318 | } | |
f56a1247 | 1319 | |
a06db3ec | 1320 | for (i = 0; i < hdev->nvqs; ++i, ++n_initialized_vqs) { |
b931bfbf | 1321 | r = vhost_virtqueue_init(hdev, hdev->vqs + i, hdev->vq_index + i); |
f56a1247 | 1322 | if (r < 0) { |
a06db3ec | 1323 | goto fail; |
f56a1247 MT |
1324 | } |
1325 | } | |
69e87b32 JW |
1326 | |
1327 | if (busyloop_timeout) { | |
1328 | for (i = 0; i < hdev->nvqs; ++i) { | |
1329 | r = vhost_virtqueue_set_busyloop_timeout(hdev, hdev->vq_index + i, | |
1330 | busyloop_timeout); | |
1331 | if (r < 0) { | |
1332 | goto fail_busyloop; | |
1333 | } | |
1334 | } | |
1335 | } | |
1336 | ||
d5970055 MT |
1337 | hdev->features = features; |
1338 | ||
04097f7c | 1339 | hdev->memory_listener = (MemoryListener) { |
50c1e149 AK |
1340 | .begin = vhost_begin, |
1341 | .commit = vhost_commit, | |
938eeb64 DDAG |
1342 | .region_add = vhost_region_addnop, |
1343 | .region_nop = vhost_region_addnop, | |
04097f7c AK |
1344 | .log_start = vhost_log_start, |
1345 | .log_stop = vhost_log_stop, | |
1346 | .log_sync = vhost_log_sync, | |
1347 | .log_global_start = vhost_log_global_start, | |
1348 | .log_global_stop = vhost_log_global_stop, | |
80a1ea37 AK |
1349 | .eventfd_add = vhost_eventfd_add, |
1350 | .eventfd_del = vhost_eventfd_del, | |
72e22d2f | 1351 | .priority = 10 |
04097f7c | 1352 | }; |
d2fc4402 | 1353 | |
375f74f4 JW |
1354 | hdev->iommu_listener = (MemoryListener) { |
1355 | .region_add = vhost_iommu_region_add, | |
1356 | .region_del = vhost_iommu_region_del, | |
1357 | }; | |
c471ad0e | 1358 | |
d2fc4402 MAL |
1359 | if (hdev->migration_blocker == NULL) { |
1360 | if (!(hdev->features & (0x1ULL << VHOST_F_LOG_ALL))) { | |
1361 | error_setg(&hdev->migration_blocker, | |
1362 | "Migration disabled: vhost lacks VHOST_F_LOG_ALL feature."); | |
648abbfb | 1363 | } else if (vhost_dev_log_is_shared(hdev) && !qemu_memfd_alloc_check()) { |
31190ed7 MAL |
1364 | error_setg(&hdev->migration_blocker, |
1365 | "Migration disabled: failed to allocate shared memory"); | |
d2fc4402 MAL |
1366 | } |
1367 | } | |
1368 | ||
1369 | if (hdev->migration_blocker != NULL) { | |
fe44dc91 AA |
1370 | r = migrate_add_blocker(hdev->migration_blocker, &local_err); |
1371 | if (local_err) { | |
1372 | error_report_err(local_err); | |
1373 | error_free(hdev->migration_blocker); | |
1374 | goto fail_busyloop; | |
1375 | } | |
7145872e | 1376 | } |
d2fc4402 | 1377 | |
7267c094 | 1378 | hdev->mem = g_malloc0(offsetof(struct vhost_memory, regions)); |
2817b260 AK |
1379 | hdev->n_mem_sections = 0; |
1380 | hdev->mem_sections = NULL; | |
d5970055 MT |
1381 | hdev->log = NULL; |
1382 | hdev->log_size = 0; | |
1383 | hdev->log_enabled = false; | |
1384 | hdev->started = false; | |
f6790af6 | 1385 | memory_listener_register(&hdev->memory_listener, &address_space_memory); |
5be5f9be | 1386 | QLIST_INSERT_HEAD(&vhost_devices, hdev, entry); |
9e2a2a3e JZ |
1387 | |
1388 | if (used_memslots > hdev->vhost_ops->vhost_backend_memslots_limit(hdev)) { | |
1389 | error_report("vhost backend memory slots limit is less" | |
1390 | " than current number of present memory slots"); | |
1391 | r = -1; | |
1392 | if (busyloop_timeout) { | |
1393 | goto fail_busyloop; | |
1394 | } else { | |
1395 | goto fail; | |
1396 | } | |
1397 | } | |
1398 | ||
d5970055 | 1399 | return 0; |
a06db3ec | 1400 | |
69e87b32 JW |
1401 | fail_busyloop: |
1402 | while (--i >= 0) { | |
1403 | vhost_virtqueue_set_busyloop_timeout(hdev, hdev->vq_index + i, 0); | |
1404 | } | |
d5970055 | 1405 | fail: |
a06db3ec MAL |
1406 | hdev->nvqs = n_initialized_vqs; |
1407 | vhost_dev_cleanup(hdev); | |
d5970055 MT |
1408 | return r; |
1409 | } | |
1410 | ||
1411 | void vhost_dev_cleanup(struct vhost_dev *hdev) | |
1412 | { | |
f56a1247 | 1413 | int i; |
e0547b59 | 1414 | |
f56a1247 MT |
1415 | for (i = 0; i < hdev->nvqs; ++i) { |
1416 | vhost_virtqueue_cleanup(hdev->vqs + i); | |
1417 | } | |
5be5f9be MAL |
1418 | if (hdev->mem) { |
1419 | /* those are only safe after successful init */ | |
1420 | memory_listener_unregister(&hdev->memory_listener); | |
1421 | QLIST_REMOVE(hdev, entry); | |
1422 | } | |
7145872e MT |
1423 | if (hdev->migration_blocker) { |
1424 | migrate_del_blocker(hdev->migration_blocker); | |
1425 | error_free(hdev->migration_blocker); | |
1426 | } | |
7267c094 | 1427 | g_free(hdev->mem); |
2817b260 | 1428 | g_free(hdev->mem_sections); |
e0547b59 MAL |
1429 | if (hdev->vhost_ops) { |
1430 | hdev->vhost_ops->vhost_backend_cleanup(hdev); | |
1431 | } | |
7b527247 | 1432 | assert(!hdev->log); |
e0547b59 MAL |
1433 | |
1434 | memset(hdev, 0, sizeof(struct vhost_dev)); | |
d5970055 MT |
1435 | } |
1436 | ||
b0b3db79 MT |
1437 | /* Stop processing guest IO notifications in qemu. |
1438 | * Start processing them in vhost in kernel. | |
1439 | */ | |
1440 | int vhost_dev_enable_notifiers(struct vhost_dev *hdev, VirtIODevice *vdev) | |
1441 | { | |
1c819449 | 1442 | BusState *qbus = BUS(qdev_get_parent_bus(DEVICE(vdev))); |
16617e36 | 1443 | int i, r, e; |
4afba631 | 1444 | |
310837de PB |
1445 | /* We will pass the notifiers to the kernel, make sure that QEMU |
1446 | * doesn't interfere. | |
1447 | */ | |
1448 | r = virtio_device_grab_ioeventfd(vdev); | |
1449 | if (r < 0) { | |
4afba631 | 1450 | error_report("binding does not support host notifiers"); |
b0b3db79 MT |
1451 | goto fail; |
1452 | } | |
1453 | ||
1454 | for (i = 0; i < hdev->nvqs; ++i) { | |
b1f0a33d CH |
1455 | r = virtio_bus_set_host_notifier(VIRTIO_BUS(qbus), hdev->vq_index + i, |
1456 | true); | |
b0b3db79 | 1457 | if (r < 0) { |
4afba631 | 1458 | error_report("vhost VQ %d notifier binding failed: %d", i, -r); |
b0b3db79 MT |
1459 | goto fail_vq; |
1460 | } | |
1461 | } | |
1462 | ||
1463 | return 0; | |
1464 | fail_vq: | |
1465 | while (--i >= 0) { | |
b1f0a33d CH |
1466 | e = virtio_bus_set_host_notifier(VIRTIO_BUS(qbus), hdev->vq_index + i, |
1467 | false); | |
16617e36 | 1468 | if (e < 0) { |
4afba631 | 1469 | error_report("vhost VQ %d notifier cleanup error: %d", i, -r); |
b0b3db79 | 1470 | } |
16617e36 | 1471 | assert (e >= 0); |
76143618 | 1472 | virtio_bus_cleanup_host_notifier(VIRTIO_BUS(qbus), hdev->vq_index + i); |
b0b3db79 | 1473 | } |
310837de | 1474 | virtio_device_release_ioeventfd(vdev); |
b0b3db79 MT |
1475 | fail: |
1476 | return r; | |
1477 | } | |
1478 | ||
1479 | /* Stop processing guest IO notifications in vhost. | |
1480 | * Start processing them in qemu. | |
1481 | * This might actually run the qemu handlers right away, | |
1482 | * so virtio in qemu must be completely setup when this is called. | |
1483 | */ | |
1484 | void vhost_dev_disable_notifiers(struct vhost_dev *hdev, VirtIODevice *vdev) | |
1485 | { | |
1c819449 | 1486 | BusState *qbus = BUS(qdev_get_parent_bus(DEVICE(vdev))); |
b0b3db79 MT |
1487 | int i, r; |
1488 | ||
1489 | for (i = 0; i < hdev->nvqs; ++i) { | |
b1f0a33d CH |
1490 | r = virtio_bus_set_host_notifier(VIRTIO_BUS(qbus), hdev->vq_index + i, |
1491 | false); | |
b0b3db79 | 1492 | if (r < 0) { |
4afba631 | 1493 | error_report("vhost VQ %d notifier cleanup failed: %d", i, -r); |
b0b3db79 MT |
1494 | } |
1495 | assert (r >= 0); | |
76143618 | 1496 | virtio_bus_cleanup_host_notifier(VIRTIO_BUS(qbus), hdev->vq_index + i); |
b0b3db79 | 1497 | } |
310837de | 1498 | virtio_device_release_ioeventfd(vdev); |
b0b3db79 MT |
1499 | } |
1500 | ||
f56a1247 MT |
1501 | /* Test and clear event pending status. |
1502 | * Should be called after unmask to avoid losing events. | |
1503 | */ | |
1504 | bool vhost_virtqueue_pending(struct vhost_dev *hdev, int n) | |
1505 | { | |
a9f98bb5 | 1506 | struct vhost_virtqueue *vq = hdev->vqs + n - hdev->vq_index; |
a9f98bb5 | 1507 | assert(n >= hdev->vq_index && n < hdev->vq_index + hdev->nvqs); |
f56a1247 MT |
1508 | return event_notifier_test_and_clear(&vq->masked_notifier); |
1509 | } | |
1510 | ||
1511 | /* Mask/unmask events from this vq. */ | |
1512 | void vhost_virtqueue_mask(struct vhost_dev *hdev, VirtIODevice *vdev, int n, | |
1513 | bool mask) | |
1514 | { | |
1515 | struct VirtQueue *vvq = virtio_get_queue(vdev, n); | |
a9f98bb5 | 1516 | int r, index = n - hdev->vq_index; |
fc57fd99 | 1517 | struct vhost_vring_file file; |
f56a1247 | 1518 | |
8695de0f MAL |
1519 | /* should only be called after backend is connected */ |
1520 | assert(hdev->vhost_ops); | |
1521 | ||
f56a1247 | 1522 | if (mask) { |
5669655a | 1523 | assert(vdev->use_guest_notifier_mask); |
a9f98bb5 | 1524 | file.fd = event_notifier_get_fd(&hdev->vqs[index].masked_notifier); |
f56a1247 MT |
1525 | } else { |
1526 | file.fd = event_notifier_get_fd(virtio_queue_get_guest_notifier(vvq)); | |
1527 | } | |
fc57fd99 | 1528 | |
21e70425 MAL |
1529 | file.index = hdev->vhost_ops->vhost_get_vq_index(hdev, n); |
1530 | r = hdev->vhost_ops->vhost_set_vring_call(hdev, &file); | |
162bba7f MAL |
1531 | if (r < 0) { |
1532 | VHOST_OPS_DEBUG("vhost_set_vring_call failed"); | |
1533 | } | |
f56a1247 MT |
1534 | } |
1535 | ||
9a2ba823 CH |
1536 | uint64_t vhost_get_features(struct vhost_dev *hdev, const int *feature_bits, |
1537 | uint64_t features) | |
2e6d46d7 NN |
1538 | { |
1539 | const int *bit = feature_bits; | |
1540 | while (*bit != VHOST_INVALID_FEATURE_BIT) { | |
9a2ba823 | 1541 | uint64_t bit_mask = (1ULL << *bit); |
2e6d46d7 NN |
1542 | if (!(hdev->features & bit_mask)) { |
1543 | features &= ~bit_mask; | |
1544 | } | |
1545 | bit++; | |
1546 | } | |
1547 | return features; | |
1548 | } | |
1549 | ||
1550 | void vhost_ack_features(struct vhost_dev *hdev, const int *feature_bits, | |
9a2ba823 | 1551 | uint64_t features) |
2e6d46d7 NN |
1552 | { |
1553 | const int *bit = feature_bits; | |
1554 | while (*bit != VHOST_INVALID_FEATURE_BIT) { | |
9a2ba823 | 1555 | uint64_t bit_mask = (1ULL << *bit); |
2e6d46d7 NN |
1556 | if (features & bit_mask) { |
1557 | hdev->acked_features |= bit_mask; | |
1558 | } | |
1559 | bit++; | |
1560 | } | |
1561 | } | |
1562 | ||
4c3e257b CL |
1563 | int vhost_dev_get_config(struct vhost_dev *hdev, uint8_t *config, |
1564 | uint32_t config_len) | |
1565 | { | |
1566 | assert(hdev->vhost_ops); | |
1567 | ||
1568 | if (hdev->vhost_ops->vhost_get_config) { | |
1569 | return hdev->vhost_ops->vhost_get_config(hdev, config, config_len); | |
1570 | } | |
1571 | ||
1572 | return -1; | |
1573 | } | |
1574 | ||
1575 | int vhost_dev_set_config(struct vhost_dev *hdev, const uint8_t *data, | |
1576 | uint32_t offset, uint32_t size, uint32_t flags) | |
1577 | { | |
1578 | assert(hdev->vhost_ops); | |
1579 | ||
1580 | if (hdev->vhost_ops->vhost_set_config) { | |
1581 | return hdev->vhost_ops->vhost_set_config(hdev, data, offset, | |
1582 | size, flags); | |
1583 | } | |
1584 | ||
1585 | return -1; | |
1586 | } | |
1587 | ||
1588 | void vhost_dev_set_config_notifier(struct vhost_dev *hdev, | |
1589 | const VhostDevConfigOps *ops) | |
1590 | { | |
4c3e257b CL |
1591 | hdev->config_ops = ops; |
1592 | } | |
1593 | ||
5ad204bf XY |
1594 | void vhost_dev_free_inflight(struct vhost_inflight *inflight) |
1595 | { | |
0ac2e635 | 1596 | if (inflight && inflight->addr) { |
5ad204bf XY |
1597 | qemu_memfd_free(inflight->addr, inflight->size, inflight->fd); |
1598 | inflight->addr = NULL; | |
1599 | inflight->fd = -1; | |
1600 | } | |
1601 | } | |
1602 | ||
1603 | static int vhost_dev_resize_inflight(struct vhost_inflight *inflight, | |
1604 | uint64_t new_size) | |
1605 | { | |
1606 | Error *err = NULL; | |
1607 | int fd = -1; | |
1608 | void *addr = qemu_memfd_alloc("vhost-inflight", new_size, | |
1609 | F_SEAL_GROW | F_SEAL_SHRINK | F_SEAL_SEAL, | |
1610 | &fd, &err); | |
1611 | ||
1612 | if (err) { | |
1613 | error_report_err(err); | |
1614 | return -1; | |
1615 | } | |
1616 | ||
1617 | vhost_dev_free_inflight(inflight); | |
1618 | inflight->offset = 0; | |
1619 | inflight->addr = addr; | |
1620 | inflight->fd = fd; | |
1621 | inflight->size = new_size; | |
1622 | ||
1623 | return 0; | |
1624 | } | |
1625 | ||
1626 | void vhost_dev_save_inflight(struct vhost_inflight *inflight, QEMUFile *f) | |
1627 | { | |
1628 | if (inflight->addr) { | |
1629 | qemu_put_be64(f, inflight->size); | |
1630 | qemu_put_be16(f, inflight->queue_size); | |
1631 | qemu_put_buffer(f, inflight->addr, inflight->size); | |
1632 | } else { | |
1633 | qemu_put_be64(f, 0); | |
1634 | } | |
1635 | } | |
1636 | ||
1637 | int vhost_dev_load_inflight(struct vhost_inflight *inflight, QEMUFile *f) | |
1638 | { | |
1639 | uint64_t size; | |
1640 | ||
1641 | size = qemu_get_be64(f); | |
1642 | if (!size) { | |
1643 | return 0; | |
1644 | } | |
1645 | ||
1646 | if (inflight->size != size) { | |
1647 | if (vhost_dev_resize_inflight(inflight, size)) { | |
1648 | return -1; | |
1649 | } | |
1650 | } | |
1651 | inflight->queue_size = qemu_get_be16(f); | |
1652 | ||
1653 | qemu_get_buffer(f, inflight->addr, size); | |
1654 | ||
1655 | return 0; | |
1656 | } | |
1657 | ||
1658 | int vhost_dev_set_inflight(struct vhost_dev *dev, | |
1659 | struct vhost_inflight *inflight) | |
1660 | { | |
1661 | int r; | |
1662 | ||
1663 | if (dev->vhost_ops->vhost_set_inflight_fd && inflight->addr) { | |
1664 | r = dev->vhost_ops->vhost_set_inflight_fd(dev, inflight); | |
1665 | if (r) { | |
1666 | VHOST_OPS_DEBUG("vhost_set_inflight_fd failed"); | |
1667 | return -errno; | |
1668 | } | |
1669 | } | |
1670 | ||
1671 | return 0; | |
1672 | } | |
1673 | ||
1674 | int vhost_dev_get_inflight(struct vhost_dev *dev, uint16_t queue_size, | |
1675 | struct vhost_inflight *inflight) | |
1676 | { | |
1677 | int r; | |
1678 | ||
1679 | if (dev->vhost_ops->vhost_get_inflight_fd) { | |
1680 | r = dev->vhost_ops->vhost_get_inflight_fd(dev, queue_size, inflight); | |
1681 | if (r) { | |
1682 | VHOST_OPS_DEBUG("vhost_get_inflight_fd failed"); | |
1683 | return -errno; | |
1684 | } | |
1685 | } | |
1686 | ||
1687 | return 0; | |
1688 | } | |
1689 | ||
b0b3db79 | 1690 | /* Host notifiers must be enabled at this point. */ |
d5970055 MT |
1691 | int vhost_dev_start(struct vhost_dev *hdev, VirtIODevice *vdev) |
1692 | { | |
1693 | int i, r; | |
24f4fe34 | 1694 | |
8695de0f MAL |
1695 | /* should only be called after backend is connected */ |
1696 | assert(hdev->vhost_ops); | |
1697 | ||
24f4fe34 | 1698 | hdev->started = true; |
c471ad0e | 1699 | hdev->vdev = vdev; |
24f4fe34 | 1700 | |
d5970055 MT |
1701 | r = vhost_dev_set_features(hdev, hdev->log_enabled); |
1702 | if (r < 0) { | |
54dd9321 | 1703 | goto fail_features; |
d5970055 | 1704 | } |
c471ad0e JW |
1705 | |
1706 | if (vhost_dev_has_iommu(hdev)) { | |
375f74f4 | 1707 | memory_listener_register(&hdev->iommu_listener, vdev->dma_as); |
c471ad0e JW |
1708 | } |
1709 | ||
21e70425 | 1710 | r = hdev->vhost_ops->vhost_set_mem_table(hdev, hdev->mem); |
d5970055 | 1711 | if (r < 0) { |
c6409692 | 1712 | VHOST_OPS_DEBUG("vhost_set_mem_table failed"); |
d5970055 | 1713 | r = -errno; |
54dd9321 | 1714 | goto fail_mem; |
d5970055 | 1715 | } |
d154e0ba | 1716 | for (i = 0; i < hdev->nvqs; ++i) { |
f56a1247 | 1717 | r = vhost_virtqueue_start(hdev, |
a9f98bb5 JW |
1718 | vdev, |
1719 | hdev->vqs + i, | |
1720 | hdev->vq_index + i); | |
d154e0ba MT |
1721 | if (r < 0) { |
1722 | goto fail_vq; | |
1723 | } | |
1724 | } | |
1725 | ||
d5970055 | 1726 | if (hdev->log_enabled) { |
e05ca820 MT |
1727 | uint64_t log_base; |
1728 | ||
d5970055 | 1729 | hdev->log_size = vhost_get_log_size(hdev); |
15324404 MAL |
1730 | hdev->log = vhost_log_get(hdev->log_size, |
1731 | vhost_dev_log_is_shared(hdev)); | |
309750fa | 1732 | log_base = (uintptr_t)hdev->log->log; |
c2bea314 | 1733 | r = hdev->vhost_ops->vhost_set_log_base(hdev, |
9a78a5dd MAL |
1734 | hdev->log_size ? log_base : 0, |
1735 | hdev->log); | |
d5970055 | 1736 | if (r < 0) { |
c6409692 | 1737 | VHOST_OPS_DEBUG("vhost_set_log_base failed"); |
d5970055 | 1738 | r = -errno; |
54dd9321 | 1739 | goto fail_log; |
d5970055 MT |
1740 | } |
1741 | } | |
ca71db43 CL |
1742 | if (hdev->vhost_ops->vhost_dev_start) { |
1743 | r = hdev->vhost_ops->vhost_dev_start(hdev, true); | |
1744 | if (r) { | |
1745 | goto fail_log; | |
1746 | } | |
1747 | } | |
3f63b4c6 JW |
1748 | if (vhost_dev_has_iommu(hdev) && |
1749 | hdev->vhost_ops->vhost_set_iotlb_callback) { | |
1750 | hdev->vhost_ops->vhost_set_iotlb_callback(hdev, true); | |
c471ad0e JW |
1751 | |
1752 | /* Update used ring information for IOTLB to work correctly, | |
1753 | * vhost-kernel code requires for this.*/ | |
1754 | for (i = 0; i < hdev->nvqs; ++i) { | |
1755 | struct vhost_virtqueue *vq = hdev->vqs + i; | |
1756 | vhost_device_iotlb_miss(hdev, vq->used_phys, true); | |
1757 | } | |
1758 | } | |
d5970055 | 1759 | return 0; |
54dd9321 | 1760 | fail_log: |
24bfa207 | 1761 | vhost_log_put(hdev, false); |
d5970055 MT |
1762 | fail_vq: |
1763 | while (--i >= 0) { | |
f56a1247 | 1764 | vhost_virtqueue_stop(hdev, |
a9f98bb5 JW |
1765 | vdev, |
1766 | hdev->vqs + i, | |
1767 | hdev->vq_index + i); | |
d5970055 | 1768 | } |
c471ad0e | 1769 | |
54dd9321 MT |
1770 | fail_mem: |
1771 | fail_features: | |
24f4fe34 MT |
1772 | |
1773 | hdev->started = false; | |
d5970055 MT |
1774 | return r; |
1775 | } | |
1776 | ||
b0b3db79 | 1777 | /* Host notifiers must be enabled at this point. */ |
d5970055 MT |
1778 | void vhost_dev_stop(struct vhost_dev *hdev, VirtIODevice *vdev) |
1779 | { | |
a9f98bb5 | 1780 | int i; |
54dd9321 | 1781 | |
8695de0f MAL |
1782 | /* should only be called after backend is connected */ |
1783 | assert(hdev->vhost_ops); | |
1784 | ||
ca71db43 CL |
1785 | if (hdev->vhost_ops->vhost_dev_start) { |
1786 | hdev->vhost_ops->vhost_dev_start(hdev, false); | |
1787 | } | |
d5970055 | 1788 | for (i = 0; i < hdev->nvqs; ++i) { |
f56a1247 | 1789 | vhost_virtqueue_stop(hdev, |
a9f98bb5 JW |
1790 | vdev, |
1791 | hdev->vqs + i, | |
1792 | hdev->vq_index + i); | |
d5970055 | 1793 | } |
54dd9321 | 1794 | |
c471ad0e | 1795 | if (vhost_dev_has_iommu(hdev)) { |
3f63b4c6 JW |
1796 | if (hdev->vhost_ops->vhost_set_iotlb_callback) { |
1797 | hdev->vhost_ops->vhost_set_iotlb_callback(hdev, false); | |
1798 | } | |
375f74f4 | 1799 | memory_listener_unregister(&hdev->iommu_listener); |
c471ad0e | 1800 | } |
309750fa | 1801 | vhost_log_put(hdev, true); |
d5970055 | 1802 | hdev->started = false; |
c471ad0e | 1803 | hdev->vdev = NULL; |
d5970055 | 1804 | } |
950d94ba MAL |
1805 | |
1806 | int vhost_net_set_backend(struct vhost_dev *hdev, | |
1807 | struct vhost_vring_file *file) | |
1808 | { | |
1809 | if (hdev->vhost_ops->vhost_net_set_backend) { | |
1810 | return hdev->vhost_ops->vhost_net_set_backend(hdev, file); | |
1811 | } | |
1812 | ||
1813 | return -1; | |
1814 | } |