]>
Commit | Line | Data |
---|---|---|
d5970055 MT |
1 | /* |
2 | * vhost support | |
3 | * | |
4 | * Copyright Red Hat, Inc. 2010 | |
5 | * | |
6 | * Authors: | |
7 | * Michael S. Tsirkin <mst@redhat.com> | |
8 | * | |
9 | * This work is licensed under the terms of the GNU GPL, version 2. See | |
10 | * the COPYING file in the top-level directory. | |
6b620ca3 PB |
11 | * |
12 | * Contributions after 2012-01-13 are licensed under the terms of the | |
13 | * GNU GPL, version 2 or (at your option) any later version. | |
d5970055 MT |
14 | */ |
15 | ||
9b8bfe21 | 16 | #include "qemu/osdep.h" |
da34e65c | 17 | #include "qapi/error.h" |
0d09e41a | 18 | #include "hw/virtio/vhost.h" |
5444e768 | 19 | #include "qemu/atomic.h" |
1de7afc9 | 20 | #include "qemu/range.h" |
04b7a152 | 21 | #include "qemu/error-report.h" |
15324404 | 22 | #include "qemu/memfd.h" |
345cc1cb | 23 | #include "qemu/log.h" |
18658a3c | 24 | #include "standard-headers/linux/vhost_types.h" |
1c819449 | 25 | #include "hw/virtio/virtio-bus.h" |
795c40b8 | 26 | #include "migration/blocker.h" |
ca77ee28 | 27 | #include "migration/qemu-file-types.h" |
c471ad0e | 28 | #include "sysemu/dma.h" |
aa3c40f6 | 29 | #include "trace.h" |
d5970055 | 30 | |
162bba7f MAL |
31 | /* enabled until disconnected backend stabilizes */ |
32 | #define _VHOST_DEBUG 1 | |
33 | ||
34 | #ifdef _VHOST_DEBUG | |
5d33ae4b RK |
35 | #define VHOST_OPS_DEBUG(retval, fmt, ...) \ |
36 | do { \ | |
37 | error_report(fmt ": %s (%d)", ## __VA_ARGS__, \ | |
38 | strerror(-retval), -retval); \ | |
39 | } while (0) | |
162bba7f | 40 | #else |
5d33ae4b | 41 | #define VHOST_OPS_DEBUG(retval, fmt, ...) \ |
162bba7f MAL |
42 | do { } while (0) |
43 | #endif | |
44 | ||
309750fa | 45 | static struct vhost_log *vhost_log; |
15324404 | 46 | static struct vhost_log *vhost_log_shm; |
309750fa | 47 | |
2ce68e4c IM |
48 | static unsigned int used_memslots; |
49 | static QLIST_HEAD(, vhost_dev) vhost_devices = | |
50 | QLIST_HEAD_INITIALIZER(vhost_devices); | |
51 | ||
52 | bool vhost_has_free_slot(void) | |
53 | { | |
54 | unsigned int slots_limit = ~0U; | |
55 | struct vhost_dev *hdev; | |
56 | ||
57 | QLIST_FOREACH(hdev, &vhost_devices, entry) { | |
58 | unsigned int r = hdev->vhost_ops->vhost_backend_memslots_limit(hdev); | |
59 | slots_limit = MIN(slots_limit, r); | |
60 | } | |
61 | return slots_limit > used_memslots; | |
62 | } | |
63 | ||
d5970055 | 64 | static void vhost_dev_sync_region(struct vhost_dev *dev, |
2817b260 | 65 | MemoryRegionSection *section, |
d5970055 MT |
66 | uint64_t mfirst, uint64_t mlast, |
67 | uint64_t rfirst, uint64_t rlast) | |
68 | { | |
309750fa JW |
69 | vhost_log_chunk_t *log = dev->log->log; |
70 | ||
d5970055 MT |
71 | uint64_t start = MAX(mfirst, rfirst); |
72 | uint64_t end = MIN(mlast, rlast); | |
309750fa JW |
73 | vhost_log_chunk_t *from = log + start / VHOST_LOG_CHUNK; |
74 | vhost_log_chunk_t *to = log + end / VHOST_LOG_CHUNK + 1; | |
33c5793b | 75 | uint64_t addr = QEMU_ALIGN_DOWN(start, VHOST_LOG_CHUNK); |
d5970055 | 76 | |
d5970055 MT |
77 | if (end < start) { |
78 | return; | |
79 | } | |
e314672a | 80 | assert(end / VHOST_LOG_CHUNK < dev->log_size); |
fbbaf9ae | 81 | assert(start / VHOST_LOG_CHUNK < dev->log_size); |
e314672a | 82 | |
d5970055 MT |
83 | for (;from < to; ++from) { |
84 | vhost_log_chunk_t log; | |
d5970055 MT |
85 | /* We first check with non-atomic: much cheaper, |
86 | * and we expect non-dirty to be the common case. */ | |
87 | if (!*from) { | |
0c600ce2 | 88 | addr += VHOST_LOG_CHUNK; |
d5970055 MT |
89 | continue; |
90 | } | |
5444e768 PB |
91 | /* Data must be read atomically. We don't really need barrier semantics |
92 | * but it's easier to use atomic_* than roll our own. */ | |
d73415a3 | 93 | log = qatomic_xchg(from, 0); |
747eb78b NC |
94 | while (log) { |
95 | int bit = ctzl(log); | |
6b37a23d MT |
96 | hwaddr page_addr; |
97 | hwaddr section_offset; | |
98 | hwaddr mr_offset; | |
6b37a23d MT |
99 | page_addr = addr + bit * VHOST_LOG_PAGE; |
100 | section_offset = page_addr - section->offset_within_address_space; | |
101 | mr_offset = section_offset + section->offset_within_region; | |
102 | memory_region_set_dirty(section->mr, mr_offset, VHOST_LOG_PAGE); | |
d5970055 MT |
103 | log &= ~(0x1ull << bit); |
104 | } | |
105 | addr += VHOST_LOG_CHUNK; | |
106 | } | |
107 | } | |
108 | ||
74b5d2b5 | 109 | bool vhost_dev_has_iommu(struct vhost_dev *dev) |
345cc1cb JW |
110 | { |
111 | VirtIODevice *vdev = dev->vdev; | |
112 | ||
113 | /* | |
114 | * For vhost, VIRTIO_F_IOMMU_PLATFORM means the backend support | |
115 | * incremental memory mapping API via IOTLB API. For platform that | |
116 | * does not have IOMMU, there's no need to enable this feature | |
117 | * which may cause unnecessary IOTLB miss/update transactions. | |
118 | */ | |
119 | if (vdev) { | |
120 | return virtio_bus_device_iommu_enabled(vdev) && | |
121 | virtio_host_has_feature(vdev, VIRTIO_F_IOMMU_PLATFORM); | |
122 | } else { | |
123 | return false; | |
124 | } | |
125 | } | |
126 | ||
04097f7c | 127 | static int vhost_sync_dirty_bitmap(struct vhost_dev *dev, |
2817b260 | 128 | MemoryRegionSection *section, |
6b37a23d MT |
129 | hwaddr first, |
130 | hwaddr last) | |
d5970055 | 131 | { |
d5970055 | 132 | int i; |
6b37a23d MT |
133 | hwaddr start_addr; |
134 | hwaddr end_addr; | |
04097f7c | 135 | |
d5970055 MT |
136 | if (!dev->log_enabled || !dev->started) { |
137 | return 0; | |
138 | } | |
6b37a23d | 139 | start_addr = section->offset_within_address_space; |
052e87b0 | 140 | end_addr = range_get_last(start_addr, int128_get64(section->size)); |
6b37a23d MT |
141 | start_addr = MAX(first, start_addr); |
142 | end_addr = MIN(last, end_addr); | |
143 | ||
d5970055 MT |
144 | for (i = 0; i < dev->mem->nregions; ++i) { |
145 | struct vhost_memory_region *reg = dev->mem->regions + i; | |
2817b260 | 146 | vhost_dev_sync_region(dev, section, start_addr, end_addr, |
d5970055 MT |
147 | reg->guest_phys_addr, |
148 | range_get_last(reg->guest_phys_addr, | |
149 | reg->memory_size)); | |
150 | } | |
151 | for (i = 0; i < dev->nvqs; ++i) { | |
152 | struct vhost_virtqueue *vq = dev->vqs + i; | |
240e647a LH |
153 | |
154 | if (!vq->used_phys && !vq->used_size) { | |
155 | continue; | |
156 | } | |
157 | ||
345cc1cb JW |
158 | if (vhost_dev_has_iommu(dev)) { |
159 | IOMMUTLBEntry iotlb; | |
160 | hwaddr used_phys = vq->used_phys, used_size = vq->used_size; | |
161 | hwaddr phys, s, offset; | |
162 | ||
163 | while (used_size) { | |
164 | rcu_read_lock(); | |
165 | iotlb = address_space_get_iotlb_entry(dev->vdev->dma_as, | |
166 | used_phys, | |
167 | true, | |
168 | MEMTXATTRS_UNSPECIFIED); | |
169 | rcu_read_unlock(); | |
170 | ||
171 | if (!iotlb.target_as) { | |
172 | qemu_log_mask(LOG_GUEST_ERROR, "translation " | |
173 | "failure for used_iova %"PRIx64"\n", | |
174 | used_phys); | |
175 | return -EINVAL; | |
176 | } | |
177 | ||
178 | offset = used_phys & iotlb.addr_mask; | |
179 | phys = iotlb.translated_addr + offset; | |
180 | ||
181 | /* | |
182 | * Distance from start of used ring until last byte of | |
183 | * IOMMU page. | |
184 | */ | |
185 | s = iotlb.addr_mask - offset; | |
186 | /* | |
187 | * Size of used ring, or of the part of it until end | |
188 | * of IOMMU page. To avoid zero result, do the adding | |
189 | * outside of MIN(). | |
190 | */ | |
191 | s = MIN(s, used_size - 1) + 1; | |
192 | ||
193 | vhost_dev_sync_region(dev, section, start_addr, end_addr, phys, | |
194 | range_get_last(phys, s)); | |
195 | used_size -= s; | |
196 | used_phys += s; | |
197 | } | |
198 | } else { | |
199 | vhost_dev_sync_region(dev, section, start_addr, | |
200 | end_addr, vq->used_phys, | |
201 | range_get_last(vq->used_phys, vq->used_size)); | |
202 | } | |
d5970055 MT |
203 | } |
204 | return 0; | |
205 | } | |
206 | ||
04097f7c AK |
207 | static void vhost_log_sync(MemoryListener *listener, |
208 | MemoryRegionSection *section) | |
209 | { | |
210 | struct vhost_dev *dev = container_of(listener, struct vhost_dev, | |
211 | memory_listener); | |
6b37a23d MT |
212 | vhost_sync_dirty_bitmap(dev, section, 0x0, ~0x0ULL); |
213 | } | |
04097f7c | 214 | |
6b37a23d MT |
215 | static void vhost_log_sync_range(struct vhost_dev *dev, |
216 | hwaddr first, hwaddr last) | |
217 | { | |
218 | int i; | |
219 | /* FIXME: this is N^2 in number of sections */ | |
220 | for (i = 0; i < dev->n_mem_sections; ++i) { | |
221 | MemoryRegionSection *section = &dev->mem_sections[i]; | |
222 | vhost_sync_dirty_bitmap(dev, section, first, last); | |
223 | } | |
04097f7c AK |
224 | } |
225 | ||
d5970055 MT |
226 | static uint64_t vhost_get_log_size(struct vhost_dev *dev) |
227 | { | |
228 | uint64_t log_size = 0; | |
229 | int i; | |
230 | for (i = 0; i < dev->mem->nregions; ++i) { | |
231 | struct vhost_memory_region *reg = dev->mem->regions + i; | |
232 | uint64_t last = range_get_last(reg->guest_phys_addr, | |
233 | reg->memory_size); | |
234 | log_size = MAX(log_size, last / VHOST_LOG_CHUNK + 1); | |
235 | } | |
d5970055 MT |
236 | return log_size; |
237 | } | |
15324404 | 238 | |
9b1d929a TG |
239 | static int vhost_set_backend_type(struct vhost_dev *dev, |
240 | VhostBackendType backend_type) | |
241 | { | |
242 | int r = 0; | |
243 | ||
244 | switch (backend_type) { | |
245 | #ifdef CONFIG_VHOST_KERNEL | |
246 | case VHOST_BACKEND_TYPE_KERNEL: | |
247 | dev->vhost_ops = &kernel_ops; | |
248 | break; | |
249 | #endif | |
250 | #ifdef CONFIG_VHOST_USER | |
251 | case VHOST_BACKEND_TYPE_USER: | |
252 | dev->vhost_ops = &user_ops; | |
253 | break; | |
254 | #endif | |
255 | #ifdef CONFIG_VHOST_VDPA | |
256 | case VHOST_BACKEND_TYPE_VDPA: | |
257 | dev->vhost_ops = &vdpa_ops; | |
258 | break; | |
259 | #endif | |
260 | default: | |
261 | error_report("Unknown vhost backend type"); | |
262 | r = -1; | |
263 | } | |
264 | ||
265 | return r; | |
266 | } | |
267 | ||
15324404 | 268 | static struct vhost_log *vhost_log_alloc(uint64_t size, bool share) |
309750fa | 269 | { |
0f2956f9 | 270 | Error *err = NULL; |
15324404 MAL |
271 | struct vhost_log *log; |
272 | uint64_t logsize = size * sizeof(*(log->log)); | |
273 | int fd = -1; | |
274 | ||
275 | log = g_new0(struct vhost_log, 1); | |
276 | if (share) { | |
277 | log->log = qemu_memfd_alloc("vhost-log", logsize, | |
278 | F_SEAL_GROW | F_SEAL_SHRINK | F_SEAL_SEAL, | |
0f2956f9 MAL |
279 | &fd, &err); |
280 | if (err) { | |
281 | error_report_err(err); | |
282 | g_free(log); | |
283 | return NULL; | |
284 | } | |
15324404 MAL |
285 | memset(log->log, 0, logsize); |
286 | } else { | |
287 | log->log = g_malloc0(logsize); | |
288 | } | |
309750fa JW |
289 | |
290 | log->size = size; | |
291 | log->refcnt = 1; | |
15324404 | 292 | log->fd = fd; |
309750fa JW |
293 | |
294 | return log; | |
295 | } | |
296 | ||
15324404 | 297 | static struct vhost_log *vhost_log_get(uint64_t size, bool share) |
309750fa | 298 | { |
15324404 MAL |
299 | struct vhost_log *log = share ? vhost_log_shm : vhost_log; |
300 | ||
301 | if (!log || log->size != size) { | |
302 | log = vhost_log_alloc(size, share); | |
303 | if (share) { | |
304 | vhost_log_shm = log; | |
305 | } else { | |
306 | vhost_log = log; | |
307 | } | |
309750fa | 308 | } else { |
15324404 | 309 | ++log->refcnt; |
309750fa JW |
310 | } |
311 | ||
15324404 | 312 | return log; |
309750fa JW |
313 | } |
314 | ||
315 | static void vhost_log_put(struct vhost_dev *dev, bool sync) | |
316 | { | |
317 | struct vhost_log *log = dev->log; | |
318 | ||
319 | if (!log) { | |
320 | return; | |
321 | } | |
322 | ||
323 | --log->refcnt; | |
324 | if (log->refcnt == 0) { | |
325 | /* Sync only the range covered by the old log */ | |
326 | if (dev->log_size && sync) { | |
327 | vhost_log_sync_range(dev, 0, dev->log_size * VHOST_LOG_CHUNK - 1); | |
328 | } | |
15324404 | 329 | |
309750fa | 330 | if (vhost_log == log) { |
15324404 | 331 | g_free(log->log); |
309750fa | 332 | vhost_log = NULL; |
15324404 MAL |
333 | } else if (vhost_log_shm == log) { |
334 | qemu_memfd_free(log->log, log->size * sizeof(*(log->log)), | |
335 | log->fd); | |
336 | vhost_log_shm = NULL; | |
309750fa | 337 | } |
15324404 | 338 | |
309750fa JW |
339 | g_free(log); |
340 | } | |
5c0ba1be FF |
341 | |
342 | dev->log = NULL; | |
343 | dev->log_size = 0; | |
309750fa | 344 | } |
d5970055 | 345 | |
15324404 MAL |
346 | static bool vhost_dev_log_is_shared(struct vhost_dev *dev) |
347 | { | |
348 | return dev->vhost_ops->vhost_requires_shm_log && | |
349 | dev->vhost_ops->vhost_requires_shm_log(dev); | |
350 | } | |
351 | ||
352 | static inline void vhost_dev_log_resize(struct vhost_dev *dev, uint64_t size) | |
d5970055 | 353 | { |
15324404 | 354 | struct vhost_log *log = vhost_log_get(size, vhost_dev_log_is_shared(dev)); |
309750fa | 355 | uint64_t log_base = (uintptr_t)log->log; |
6b37a23d | 356 | int r; |
6528499f | 357 | |
636f4ddd MAL |
358 | /* inform backend of log switching, this must be done before |
359 | releasing the current log, to ensure no logging is lost */ | |
9a78a5dd | 360 | r = dev->vhost_ops->vhost_set_log_base(dev, log_base, log); |
162bba7f | 361 | if (r < 0) { |
5d33ae4b | 362 | VHOST_OPS_DEBUG(r, "vhost_set_log_base failed"); |
162bba7f MAL |
363 | } |
364 | ||
309750fa | 365 | vhost_log_put(dev, true); |
d5970055 MT |
366 | dev->log = log; |
367 | dev->log_size = size; | |
368 | } | |
369 | ||
c471ad0e | 370 | static void *vhost_memory_map(struct vhost_dev *dev, hwaddr addr, |
b897a474 | 371 | hwaddr *plen, bool is_write) |
c471ad0e JW |
372 | { |
373 | if (!vhost_dev_has_iommu(dev)) { | |
374 | return cpu_physical_memory_map(addr, plen, is_write); | |
375 | } else { | |
376 | return (void *)(uintptr_t)addr; | |
377 | } | |
378 | } | |
379 | ||
380 | static void vhost_memory_unmap(struct vhost_dev *dev, void *buffer, | |
381 | hwaddr len, int is_write, | |
382 | hwaddr access_len) | |
383 | { | |
384 | if (!vhost_dev_has_iommu(dev)) { | |
385 | cpu_physical_memory_unmap(buffer, len, is_write, access_len); | |
386 | } | |
387 | } | |
f1f9e6c5 | 388 | |
0ca1fd2d DDAG |
389 | static int vhost_verify_ring_part_mapping(void *ring_hva, |
390 | uint64_t ring_gpa, | |
391 | uint64_t ring_size, | |
392 | void *reg_hva, | |
393 | uint64_t reg_gpa, | |
394 | uint64_t reg_size) | |
f1f9e6c5 | 395 | { |
0ca1fd2d DDAG |
396 | uint64_t hva_ring_offset; |
397 | uint64_t ring_last = range_get_last(ring_gpa, ring_size); | |
398 | uint64_t reg_last = range_get_last(reg_gpa, reg_size); | |
f1f9e6c5 | 399 | |
0ca1fd2d | 400 | if (ring_last < reg_gpa || ring_gpa > reg_last) { |
f1f9e6c5 GK |
401 | return 0; |
402 | } | |
0ca1fd2d DDAG |
403 | /* check that whole ring's is mapped */ |
404 | if (ring_last > reg_last) { | |
405 | return -ENOMEM; | |
f1f9e6c5 | 406 | } |
0ca1fd2d DDAG |
407 | /* check that ring's MemoryRegion wasn't replaced */ |
408 | hva_ring_offset = ring_gpa - reg_gpa; | |
409 | if (ring_hva != reg_hva + hva_ring_offset) { | |
410 | return -EBUSY; | |
f1f9e6c5 | 411 | } |
0ca1fd2d DDAG |
412 | |
413 | return 0; | |
f1f9e6c5 GK |
414 | } |
415 | ||
d5970055 | 416 | static int vhost_verify_ring_mappings(struct vhost_dev *dev, |
0ca1fd2d DDAG |
417 | void *reg_hva, |
418 | uint64_t reg_gpa, | |
419 | uint64_t reg_size) | |
d5970055 | 420 | { |
f1f9e6c5 | 421 | int i, j; |
8617343f | 422 | int r = 0; |
f1f9e6c5 GK |
423 | const char *part_name[] = { |
424 | "descriptor table", | |
425 | "available ring", | |
426 | "used ring" | |
427 | }; | |
8617343f | 428 | |
aebbdbee JW |
429 | if (vhost_dev_has_iommu(dev)) { |
430 | return 0; | |
431 | } | |
432 | ||
f1f9e6c5 | 433 | for (i = 0; i < dev->nvqs; ++i) { |
d5970055 | 434 | struct vhost_virtqueue *vq = dev->vqs + i; |
d5970055 | 435 | |
fb20fbb7 JH |
436 | if (vq->desc_phys == 0) { |
437 | continue; | |
438 | } | |
439 | ||
f1f9e6c5 | 440 | j = 0; |
0ca1fd2d DDAG |
441 | r = vhost_verify_ring_part_mapping( |
442 | vq->desc, vq->desc_phys, vq->desc_size, | |
443 | reg_hva, reg_gpa, reg_size); | |
2fe45ec3 | 444 | if (r) { |
f1f9e6c5 | 445 | break; |
d5970055 | 446 | } |
f1f9e6c5 GK |
447 | |
448 | j++; | |
0ca1fd2d | 449 | r = vhost_verify_ring_part_mapping( |
9fac50c8 | 450 | vq->avail, vq->avail_phys, vq->avail_size, |
0ca1fd2d | 451 | reg_hva, reg_gpa, reg_size); |
2fe45ec3 | 452 | if (r) { |
f1f9e6c5 | 453 | break; |
d5970055 | 454 | } |
f1f9e6c5 GK |
455 | |
456 | j++; | |
0ca1fd2d | 457 | r = vhost_verify_ring_part_mapping( |
9fac50c8 | 458 | vq->used, vq->used_phys, vq->used_size, |
0ca1fd2d | 459 | reg_hva, reg_gpa, reg_size); |
2fe45ec3 | 460 | if (r) { |
f1f9e6c5 | 461 | break; |
d5970055 | 462 | } |
f1f9e6c5 GK |
463 | } |
464 | ||
465 | if (r == -ENOMEM) { | |
466 | error_report("Unable to map %s for ring %d", part_name[j], i); | |
467 | } else if (r == -EBUSY) { | |
468 | error_report("%s relocated for ring %d", part_name[j], i); | |
d5970055 | 469 | } |
8617343f | 470 | return r; |
d5970055 MT |
471 | } |
472 | ||
083b9bd7 AB |
473 | /* |
474 | * vhost_section: identify sections needed for vhost access | |
475 | * | |
476 | * We only care about RAM sections here (where virtqueue and guest | |
477 | * internals accessed by virtio might live). If we find one we still | |
478 | * allow the backend to potentially filter it out of our list. | |
479 | */ | |
988a2775 | 480 | static bool vhost_section(struct vhost_dev *dev, MemoryRegionSection *section) |
af603142 | 481 | { |
083b9bd7 AB |
482 | MemoryRegion *mr = section->mr; |
483 | ||
484 | if (memory_region_is_ram(mr) && !memory_region_is_rom(mr)) { | |
485 | uint8_t dirty_mask = memory_region_get_dirty_log_mask(mr); | |
486 | uint8_t handled_dirty; | |
487 | ||
488 | /* | |
489 | * Kernel based vhost doesn't handle any block which is doing | |
490 | * dirty-tracking other than migration for which it has | |
491 | * specific logging support. However for TCG the kernel never | |
492 | * gets involved anyway so we can also ignore it's | |
493 | * self-modiying code detection flags. However a vhost-user | |
494 | * client could still confuse a TCG guest if it re-writes | |
495 | * executable memory that has already been translated. | |
496 | */ | |
497 | handled_dirty = (1 << DIRTY_MEMORY_MIGRATION) | | |
498 | (1 << DIRTY_MEMORY_CODE); | |
499 | ||
500 | if (dirty_mask & ~handled_dirty) { | |
501 | trace_vhost_reject_section(mr->name, 1); | |
502 | return false; | |
503 | } | |
aa3c40f6 | 504 | |
083b9bd7 AB |
505 | if (dev->vhost_ops->vhost_backend_mem_section_filter && |
506 | !dev->vhost_ops->vhost_backend_mem_section_filter(dev, section)) { | |
507 | trace_vhost_reject_section(mr->name, 2); | |
508 | return false; | |
509 | } | |
988a2775 | 510 | |
083b9bd7 AB |
511 | trace_vhost_section(mr->name); |
512 | return true; | |
513 | } else { | |
514 | trace_vhost_reject_section(mr->name, 3); | |
515 | return false; | |
516 | } | |
af603142 NB |
517 | } |
518 | ||
519 | static void vhost_begin(MemoryListener *listener) | |
520 | { | |
521 | struct vhost_dev *dev = container_of(listener, struct vhost_dev, | |
522 | memory_listener); | |
c44317ef DDAG |
523 | dev->tmp_sections = NULL; |
524 | dev->n_tmp_sections = 0; | |
af603142 | 525 | } |
d5970055 | 526 | |
af603142 NB |
527 | static void vhost_commit(MemoryListener *listener) |
528 | { | |
529 | struct vhost_dev *dev = container_of(listener, struct vhost_dev, | |
530 | memory_listener); | |
c44317ef DDAG |
531 | MemoryRegionSection *old_sections; |
532 | int n_old_sections; | |
af603142 | 533 | uint64_t log_size; |
ade6d081 | 534 | size_t regions_size; |
af603142 | 535 | int r; |
0ca1fd2d | 536 | int i; |
ade6d081 | 537 | bool changed = false; |
af603142 | 538 | |
ade6d081 DDAG |
539 | /* Note we can be called before the device is started, but then |
540 | * starting the device calls set_mem_table, so we need to have | |
541 | * built the data structures. | |
542 | */ | |
c44317ef DDAG |
543 | old_sections = dev->mem_sections; |
544 | n_old_sections = dev->n_mem_sections; | |
545 | dev->mem_sections = dev->tmp_sections; | |
546 | dev->n_mem_sections = dev->n_tmp_sections; | |
547 | ||
ade6d081 DDAG |
548 | if (dev->n_mem_sections != n_old_sections) { |
549 | changed = true; | |
550 | } else { | |
551 | /* Same size, lets check the contents */ | |
3fc4a64c DDAG |
552 | for (int i = 0; i < n_old_sections; i++) { |
553 | if (!MemoryRegionSection_eq(&old_sections[i], | |
554 | &dev->mem_sections[i])) { | |
555 | changed = true; | |
556 | break; | |
557 | } | |
558 | } | |
af603142 | 559 | } |
ade6d081 DDAG |
560 | |
561 | trace_vhost_commit(dev->started, changed); | |
562 | if (!changed) { | |
c44317ef | 563 | goto out; |
d5970055 | 564 | } |
ade6d081 DDAG |
565 | |
566 | /* Rebuild the regions list from the new sections list */ | |
567 | regions_size = offsetof(struct vhost_memory, regions) + | |
568 | dev->n_mem_sections * sizeof dev->mem->regions[0]; | |
569 | dev->mem = g_realloc(dev->mem, regions_size); | |
570 | dev->mem->nregions = dev->n_mem_sections; | |
571 | used_memslots = dev->mem->nregions; | |
572 | for (i = 0; i < dev->n_mem_sections; i++) { | |
573 | struct vhost_memory_region *cur_vmr = dev->mem->regions + i; | |
574 | struct MemoryRegionSection *mrs = dev->mem_sections + i; | |
575 | ||
576 | cur_vmr->guest_phys_addr = mrs->offset_within_address_space; | |
577 | cur_vmr->memory_size = int128_get64(mrs->size); | |
578 | cur_vmr->userspace_addr = | |
579 | (uintptr_t)memory_region_get_ram_ptr(mrs->mr) + | |
580 | mrs->offset_within_region; | |
581 | cur_vmr->flags_padding = 0; | |
582 | } | |
583 | ||
584 | if (!dev->started) { | |
c44317ef | 585 | goto out; |
af603142 | 586 | } |
d5970055 | 587 | |
0ca1fd2d DDAG |
588 | for (i = 0; i < dev->mem->nregions; i++) { |
589 | if (vhost_verify_ring_mappings(dev, | |
590 | (void *)(uintptr_t)dev->mem->regions[i].userspace_addr, | |
591 | dev->mem->regions[i].guest_phys_addr, | |
592 | dev->mem->regions[i].memory_size)) { | |
593 | error_report("Verify ring failure on region %d", i); | |
594 | abort(); | |
595 | } | |
d5970055 MT |
596 | } |
597 | ||
598 | if (!dev->log_enabled) { | |
21e70425 | 599 | r = dev->vhost_ops->vhost_set_mem_table(dev, dev->mem); |
162bba7f | 600 | if (r < 0) { |
5d33ae4b | 601 | VHOST_OPS_DEBUG(r, "vhost_set_mem_table failed"); |
162bba7f | 602 | } |
c44317ef | 603 | goto out; |
d5970055 MT |
604 | } |
605 | log_size = vhost_get_log_size(dev); | |
606 | /* We allocate an extra 4K bytes to log, | |
607 | * to reduce the * number of reallocations. */ | |
608 | #define VHOST_LOG_BUFFER (0x1000 / sizeof *dev->log) | |
609 | /* To log more, must increase log size before table update. */ | |
610 | if (dev->log_size < log_size) { | |
611 | vhost_dev_log_resize(dev, log_size + VHOST_LOG_BUFFER); | |
612 | } | |
21e70425 | 613 | r = dev->vhost_ops->vhost_set_mem_table(dev, dev->mem); |
162bba7f | 614 | if (r < 0) { |
5d33ae4b | 615 | VHOST_OPS_DEBUG(r, "vhost_set_mem_table failed"); |
162bba7f | 616 | } |
d5970055 MT |
617 | /* To log less, can only decrease log size after table update. */ |
618 | if (dev->log_size > log_size + VHOST_LOG_BUFFER) { | |
619 | vhost_dev_log_resize(dev, log_size); | |
620 | } | |
c44317ef DDAG |
621 | |
622 | out: | |
623 | /* Deref the old list of sections, this must happen _after_ the | |
624 | * vhost_set_mem_table to ensure the client isn't still using the | |
625 | * section we're about to unref. | |
626 | */ | |
627 | while (n_old_sections--) { | |
628 | memory_region_unref(old_sections[n_old_sections].mr); | |
629 | } | |
630 | g_free(old_sections); | |
631 | return; | |
632 | } | |
633 | ||
48d7c975 DDAG |
634 | /* Adds the section data to the tmp_section structure. |
635 | * It relies on the listener calling us in memory address order | |
636 | * and for each region (via the _add and _nop methods) to | |
637 | * join neighbours. | |
638 | */ | |
639 | static void vhost_region_add_section(struct vhost_dev *dev, | |
640 | MemoryRegionSection *section) | |
c44317ef | 641 | { |
48d7c975 DDAG |
642 | bool need_add = true; |
643 | uint64_t mrs_size = int128_get64(section->size); | |
644 | uint64_t mrs_gpa = section->offset_within_address_space; | |
645 | uintptr_t mrs_host = (uintptr_t)memory_region_get_ram_ptr(section->mr) + | |
646 | section->offset_within_region; | |
c1ece84e | 647 | RAMBlock *mrs_rb = section->mr->ram_block; |
48d7c975 DDAG |
648 | |
649 | trace_vhost_region_add_section(section->mr->name, mrs_gpa, mrs_size, | |
650 | mrs_host); | |
651 | ||
83475056 | 652 | if (dev->vhost_ops->backend_type == VHOST_BACKEND_TYPE_USER) { |
76525114 DDAG |
653 | /* Round the section to it's page size */ |
654 | /* First align the start down to a page boundary */ | |
655 | size_t mrs_page = qemu_ram_pagesize(mrs_rb); | |
656 | uint64_t alignage = mrs_host & (mrs_page - 1); | |
657 | if (alignage) { | |
658 | mrs_host -= alignage; | |
659 | mrs_size += alignage; | |
660 | mrs_gpa -= alignage; | |
661 | } | |
662 | /* Now align the size up to a page boundary */ | |
663 | alignage = mrs_size & (mrs_page - 1); | |
664 | if (alignage) { | |
665 | mrs_size += mrs_page - alignage; | |
666 | } | |
83475056 MT |
667 | trace_vhost_region_add_section_aligned(section->mr->name, mrs_gpa, |
668 | mrs_size, mrs_host); | |
76525114 | 669 | } |
c1ece84e | 670 | |
48d7c975 DDAG |
671 | if (dev->n_tmp_sections) { |
672 | /* Since we already have at least one section, lets see if | |
673 | * this extends it; since we're scanning in order, we only | |
674 | * have to look at the last one, and the FlatView that calls | |
675 | * us shouldn't have overlaps. | |
676 | */ | |
677 | MemoryRegionSection *prev_sec = dev->tmp_sections + | |
678 | (dev->n_tmp_sections - 1); | |
679 | uint64_t prev_gpa_start = prev_sec->offset_within_address_space; | |
680 | uint64_t prev_size = int128_get64(prev_sec->size); | |
681 | uint64_t prev_gpa_end = range_get_last(prev_gpa_start, prev_size); | |
682 | uint64_t prev_host_start = | |
683 | (uintptr_t)memory_region_get_ram_ptr(prev_sec->mr) + | |
684 | prev_sec->offset_within_region; | |
685 | uint64_t prev_host_end = range_get_last(prev_host_start, prev_size); | |
686 | ||
c1ece84e DDAG |
687 | if (mrs_gpa <= (prev_gpa_end + 1)) { |
688 | /* OK, looks like overlapping/intersecting - it's possible that | |
689 | * the rounding to page sizes has made them overlap, but they should | |
690 | * match up in the same RAMBlock if they do. | |
691 | */ | |
692 | if (mrs_gpa < prev_gpa_start) { | |
ff477614 DDAG |
693 | error_report("%s:Section '%s' rounded to %"PRIx64 |
694 | " prior to previous '%s' %"PRIx64, | |
695 | __func__, section->mr->name, mrs_gpa, | |
696 | prev_sec->mr->name, prev_gpa_start); | |
c1ece84e DDAG |
697 | /* A way to cleanly fail here would be better */ |
698 | return; | |
699 | } | |
700 | /* Offset from the start of the previous GPA to this GPA */ | |
701 | size_t offset = mrs_gpa - prev_gpa_start; | |
702 | ||
703 | if (prev_host_start + offset == mrs_host && | |
704 | section->mr == prev_sec->mr && | |
705 | (!dev->vhost_ops->vhost_backend_can_merge || | |
706 | dev->vhost_ops->vhost_backend_can_merge(dev, | |
48d7c975 DDAG |
707 | mrs_host, mrs_size, |
708 | prev_host_start, prev_size))) { | |
c1ece84e DDAG |
709 | uint64_t max_end = MAX(prev_host_end, mrs_host + mrs_size); |
710 | need_add = false; | |
711 | prev_sec->offset_within_address_space = | |
712 | MIN(prev_gpa_start, mrs_gpa); | |
713 | prev_sec->offset_within_region = | |
714 | MIN(prev_host_start, mrs_host) - | |
715 | (uintptr_t)memory_region_get_ram_ptr(prev_sec->mr); | |
716 | prev_sec->size = int128_make64(max_end - MIN(prev_host_start, | |
717 | mrs_host)); | |
718 | trace_vhost_region_add_section_merge(section->mr->name, | |
719 | int128_get64(prev_sec->size), | |
720 | prev_sec->offset_within_address_space, | |
721 | prev_sec->offset_within_region); | |
722 | } else { | |
e7b94a84 DDAG |
723 | /* adjoining regions are fine, but overlapping ones with |
724 | * different blocks/offsets shouldn't happen | |
725 | */ | |
726 | if (mrs_gpa != prev_gpa_end + 1) { | |
727 | error_report("%s: Overlapping but not coherent sections " | |
728 | "at %"PRIx64, | |
729 | __func__, mrs_gpa); | |
730 | return; | |
731 | } | |
c1ece84e | 732 | } |
48d7c975 DDAG |
733 | } |
734 | } | |
735 | ||
736 | if (need_add) { | |
737 | ++dev->n_tmp_sections; | |
738 | dev->tmp_sections = g_renew(MemoryRegionSection, dev->tmp_sections, | |
739 | dev->n_tmp_sections); | |
740 | dev->tmp_sections[dev->n_tmp_sections - 1] = *section; | |
741 | /* The flatview isn't stable and we don't use it, making it NULL | |
742 | * means we can memcmp the list. | |
743 | */ | |
744 | dev->tmp_sections[dev->n_tmp_sections - 1].fv = NULL; | |
745 | memory_region_ref(section->mr); | |
746 | } | |
50c1e149 AK |
747 | } |
748 | ||
938eeb64 DDAG |
749 | /* Used for both add and nop callbacks */ |
750 | static void vhost_region_addnop(MemoryListener *listener, | |
751 | MemoryRegionSection *section) | |
04097f7c | 752 | { |
2817b260 AK |
753 | struct vhost_dev *dev = container_of(listener, struct vhost_dev, |
754 | memory_listener); | |
755 | ||
988a2775 | 756 | if (!vhost_section(dev, section)) { |
c49450b9 AK |
757 | return; |
758 | } | |
48d7c975 | 759 | vhost_region_add_section(dev, section); |
04097f7c AK |
760 | } |
761 | ||
375f74f4 JW |
762 | static void vhost_iommu_unmap_notify(IOMMUNotifier *n, IOMMUTLBEntry *iotlb) |
763 | { | |
764 | struct vhost_iommu *iommu = container_of(n, struct vhost_iommu, n); | |
765 | struct vhost_dev *hdev = iommu->hdev; | |
766 | hwaddr iova = iotlb->iova + iommu->iommu_offset; | |
767 | ||
020e571b MC |
768 | if (vhost_backend_invalidate_device_iotlb(hdev, iova, |
769 | iotlb->addr_mask + 1)) { | |
375f74f4 JW |
770 | error_report("Fail to invalidate device iotlb"); |
771 | } | |
772 | } | |
773 | ||
774 | static void vhost_iommu_region_add(MemoryListener *listener, | |
775 | MemoryRegionSection *section) | |
776 | { | |
777 | struct vhost_dev *dev = container_of(listener, struct vhost_dev, | |
778 | iommu_listener); | |
779 | struct vhost_iommu *iommu; | |
698feb5e | 780 | Int128 end; |
805d4496 | 781 | int iommu_idx; |
388a86df | 782 | IOMMUMemoryRegion *iommu_mr; |
375f74f4 JW |
783 | |
784 | if (!memory_region_is_iommu(section->mr)) { | |
785 | return; | |
786 | } | |
787 | ||
388a86df TB |
788 | iommu_mr = IOMMU_MEMORY_REGION(section->mr); |
789 | ||
375f74f4 | 790 | iommu = g_malloc0(sizeof(*iommu)); |
698feb5e PX |
791 | end = int128_add(int128_make64(section->offset_within_region), |
792 | section->size); | |
793 | end = int128_sub(end, int128_one()); | |
cb1efcf4 PM |
794 | iommu_idx = memory_region_iommu_attrs_to_index(iommu_mr, |
795 | MEMTXATTRS_UNSPECIFIED); | |
698feb5e | 796 | iommu_notifier_init(&iommu->n, vhost_iommu_unmap_notify, |
ee071f67 VP |
797 | dev->vdev->device_iotlb_enabled ? |
798 | IOMMU_NOTIFIER_DEVIOTLB_UNMAP : | |
799 | IOMMU_NOTIFIER_UNMAP, | |
698feb5e | 800 | section->offset_within_region, |
cb1efcf4 PM |
801 | int128_get64(end), |
802 | iommu_idx); | |
375f74f4 JW |
803 | iommu->mr = section->mr; |
804 | iommu->iommu_offset = section->offset_within_address_space - | |
805 | section->offset_within_region; | |
806 | iommu->hdev = dev; | |
ee071f67 VP |
807 | memory_region_register_iommu_notifier(section->mr, &iommu->n, |
808 | &error_fatal); | |
375f74f4 JW |
809 | QLIST_INSERT_HEAD(&dev->iommu_list, iommu, iommu_next); |
810 | /* TODO: can replay help performance here? */ | |
811 | } | |
812 | ||
813 | static void vhost_iommu_region_del(MemoryListener *listener, | |
814 | MemoryRegionSection *section) | |
815 | { | |
816 | struct vhost_dev *dev = container_of(listener, struct vhost_dev, | |
817 | iommu_listener); | |
818 | struct vhost_iommu *iommu; | |
819 | ||
820 | if (!memory_region_is_iommu(section->mr)) { | |
821 | return; | |
822 | } | |
823 | ||
824 | QLIST_FOREACH(iommu, &dev->iommu_list, iommu_next) { | |
698feb5e PX |
825 | if (iommu->mr == section->mr && |
826 | iommu->n.start == section->offset_within_region) { | |
375f74f4 JW |
827 | memory_region_unregister_iommu_notifier(iommu->mr, |
828 | &iommu->n); | |
829 | QLIST_REMOVE(iommu, iommu_next); | |
830 | g_free(iommu); | |
831 | break; | |
832 | } | |
833 | } | |
834 | } | |
835 | ||
ee071f67 VP |
836 | void vhost_toggle_device_iotlb(VirtIODevice *vdev) |
837 | { | |
838 | VirtioDeviceClass *vdc = VIRTIO_DEVICE_GET_CLASS(vdev); | |
839 | struct vhost_dev *dev; | |
840 | struct vhost_iommu *iommu; | |
841 | ||
842 | if (vdev->vhost_started) { | |
843 | dev = vdc->get_vhost(vdev); | |
844 | } else { | |
845 | return; | |
846 | } | |
847 | ||
848 | QLIST_FOREACH(iommu, &dev->iommu_list, iommu_next) { | |
849 | memory_region_unregister_iommu_notifier(iommu->mr, &iommu->n); | |
850 | iommu->n.notifier_flags = vdev->device_iotlb_enabled ? | |
851 | IOMMU_NOTIFIER_DEVIOTLB_UNMAP : IOMMU_NOTIFIER_UNMAP; | |
852 | memory_region_register_iommu_notifier(iommu->mr, &iommu->n, | |
853 | &error_fatal); | |
854 | } | |
855 | } | |
856 | ||
d5970055 MT |
857 | static int vhost_virtqueue_set_addr(struct vhost_dev *dev, |
858 | struct vhost_virtqueue *vq, | |
859 | unsigned idx, bool enable_log) | |
860 | { | |
b4ab225c CL |
861 | struct vhost_vring_addr addr; |
862 | int r; | |
863 | memset(&addr, 0, sizeof(struct vhost_vring_addr)); | |
864 | ||
865 | if (dev->vhost_ops->vhost_vq_get_addr) { | |
866 | r = dev->vhost_ops->vhost_vq_get_addr(dev, &addr, vq); | |
867 | if (r < 0) { | |
5d33ae4b RK |
868 | VHOST_OPS_DEBUG(r, "vhost_vq_get_addr failed"); |
869 | return r; | |
b4ab225c CL |
870 | } |
871 | } else { | |
872 | addr.desc_user_addr = (uint64_t)(unsigned long)vq->desc; | |
873 | addr.avail_user_addr = (uint64_t)(unsigned long)vq->avail; | |
874 | addr.used_user_addr = (uint64_t)(unsigned long)vq->used; | |
875 | } | |
876 | addr.index = idx; | |
877 | addr.log_guest_addr = vq->used_phys; | |
878 | addr.flags = enable_log ? (1 << VHOST_VRING_F_LOG) : 0; | |
879 | r = dev->vhost_ops->vhost_set_vring_addr(dev, &addr); | |
d5970055 | 880 | if (r < 0) { |
5d33ae4b | 881 | VHOST_OPS_DEBUG(r, "vhost_set_vring_addr failed"); |
d5970055 | 882 | } |
5d33ae4b | 883 | return r; |
d5970055 MT |
884 | } |
885 | ||
c471ad0e JW |
886 | static int vhost_dev_set_features(struct vhost_dev *dev, |
887 | bool enable_log) | |
d5970055 MT |
888 | { |
889 | uint64_t features = dev->acked_features; | |
890 | int r; | |
891 | if (enable_log) { | |
9a2ba823 | 892 | features |= 0x1ULL << VHOST_F_LOG_ALL; |
d5970055 | 893 | } |
f7ef7e6e JW |
894 | if (!vhost_dev_has_iommu(dev)) { |
895 | features &= ~(0x1ULL << VIRTIO_F_IOMMU_PLATFORM); | |
896 | } | |
7a471694 CL |
897 | if (dev->vhost_ops->vhost_force_iommu) { |
898 | if (dev->vhost_ops->vhost_force_iommu(dev) == true) { | |
899 | features |= 0x1ULL << VIRTIO_F_IOMMU_PLATFORM; | |
900 | } | |
901 | } | |
21e70425 | 902 | r = dev->vhost_ops->vhost_set_features(dev, features); |
c6409692 | 903 | if (r < 0) { |
5d33ae4b | 904 | VHOST_OPS_DEBUG(r, "vhost_set_features failed"); |
b37556ed JW |
905 | goto out; |
906 | } | |
907 | if (dev->vhost_ops->vhost_set_backend_cap) { | |
908 | r = dev->vhost_ops->vhost_set_backend_cap(dev); | |
909 | if (r < 0) { | |
5d33ae4b | 910 | VHOST_OPS_DEBUG(r, "vhost_set_backend_cap failed"); |
b37556ed JW |
911 | goto out; |
912 | } | |
c6409692 | 913 | } |
b37556ed JW |
914 | |
915 | out: | |
5d33ae4b | 916 | return r; |
d5970055 MT |
917 | } |
918 | ||
919 | static int vhost_dev_set_log(struct vhost_dev *dev, bool enable_log) | |
920 | { | |
162bba7f | 921 | int r, i, idx; |
1e5a050f DS |
922 | hwaddr addr; |
923 | ||
d5970055 MT |
924 | r = vhost_dev_set_features(dev, enable_log); |
925 | if (r < 0) { | |
926 | goto err_features; | |
927 | } | |
928 | for (i = 0; i < dev->nvqs; ++i) { | |
25a2a920 | 929 | idx = dev->vhost_ops->vhost_get_vq_index(dev, dev->vq_index + i); |
1e5a050f DS |
930 | addr = virtio_queue_get_desc_addr(dev->vdev, idx); |
931 | if (!addr) { | |
932 | /* | |
933 | * The queue might not be ready for start. If this | |
934 | * is the case there is no reason to continue the process. | |
935 | * The similar logic is used by the vhost_virtqueue_start() | |
936 | * routine. | |
937 | */ | |
938 | continue; | |
939 | } | |
25a2a920 | 940 | r = vhost_virtqueue_set_addr(dev, dev->vqs + i, idx, |
d5970055 MT |
941 | enable_log); |
942 | if (r < 0) { | |
943 | goto err_vq; | |
944 | } | |
945 | } | |
946 | return 0; | |
947 | err_vq: | |
948 | for (; i >= 0; --i) { | |
25a2a920 | 949 | idx = dev->vhost_ops->vhost_get_vq_index(dev, dev->vq_index + i); |
9ce305c8 NX |
950 | addr = virtio_queue_get_desc_addr(dev->vdev, idx); |
951 | if (!addr) { | |
952 | continue; | |
953 | } | |
162bba7f MAL |
954 | vhost_virtqueue_set_addr(dev, dev->vqs + i, idx, |
955 | dev->log_enabled); | |
d5970055 | 956 | } |
162bba7f | 957 | vhost_dev_set_features(dev, dev->log_enabled); |
d5970055 MT |
958 | err_features: |
959 | return r; | |
960 | } | |
961 | ||
705f7f2f | 962 | static int vhost_migration_log(MemoryListener *listener, bool enable) |
d5970055 | 963 | { |
04097f7c AK |
964 | struct vhost_dev *dev = container_of(listener, struct vhost_dev, |
965 | memory_listener); | |
d5970055 | 966 | int r; |
705f7f2f | 967 | if (enable == dev->log_enabled) { |
d5970055 MT |
968 | return 0; |
969 | } | |
970 | if (!dev->started) { | |
971 | dev->log_enabled = enable; | |
972 | return 0; | |
973 | } | |
f5b22d06 DS |
974 | |
975 | r = 0; | |
d5970055 MT |
976 | if (!enable) { |
977 | r = vhost_dev_set_log(dev, false); | |
978 | if (r < 0) { | |
f5b22d06 | 979 | goto check_dev_state; |
d5970055 | 980 | } |
309750fa | 981 | vhost_log_put(dev, false); |
d5970055 MT |
982 | } else { |
983 | vhost_dev_log_resize(dev, vhost_get_log_size(dev)); | |
984 | r = vhost_dev_set_log(dev, true); | |
985 | if (r < 0) { | |
f5b22d06 | 986 | goto check_dev_state; |
d5970055 MT |
987 | } |
988 | } | |
f5b22d06 DS |
989 | |
990 | check_dev_state: | |
d5970055 | 991 | dev->log_enabled = enable; |
f5b22d06 DS |
992 | /* |
993 | * vhost-user-* devices could change their state during log | |
994 | * initialization due to disconnect. So check dev state after | |
995 | * vhost communication. | |
996 | */ | |
997 | if (!dev->started) { | |
998 | /* | |
999 | * Since device is in the stopped state, it is okay for | |
1000 | * migration. Return success. | |
1001 | */ | |
1002 | r = 0; | |
1003 | } | |
1004 | if (r) { | |
cba42d61 | 1005 | /* An error occurred. */ |
f5b22d06 DS |
1006 | dev->log_enabled = false; |
1007 | } | |
1008 | ||
1009 | return r; | |
d5970055 MT |
1010 | } |
1011 | ||
04097f7c AK |
1012 | static void vhost_log_global_start(MemoryListener *listener) |
1013 | { | |
1014 | int r; | |
1015 | ||
1016 | r = vhost_migration_log(listener, true); | |
1017 | if (r < 0) { | |
1018 | abort(); | |
1019 | } | |
1020 | } | |
1021 | ||
1022 | static void vhost_log_global_stop(MemoryListener *listener) | |
1023 | { | |
1024 | int r; | |
1025 | ||
1026 | r = vhost_migration_log(listener, false); | |
1027 | if (r < 0) { | |
1028 | abort(); | |
1029 | } | |
1030 | } | |
1031 | ||
1032 | static void vhost_log_start(MemoryListener *listener, | |
b2dfd71c PB |
1033 | MemoryRegionSection *section, |
1034 | int old, int new) | |
04097f7c AK |
1035 | { |
1036 | /* FIXME: implement */ | |
1037 | } | |
1038 | ||
1039 | static void vhost_log_stop(MemoryListener *listener, | |
b2dfd71c PB |
1040 | MemoryRegionSection *section, |
1041 | int old, int new) | |
04097f7c AK |
1042 | { |
1043 | /* FIXME: implement */ | |
1044 | } | |
1045 | ||
46f70ff1 GK |
1046 | /* The vhost driver natively knows how to handle the vrings of non |
1047 | * cross-endian legacy devices and modern devices. Only legacy devices | |
1048 | * exposed to a bi-endian guest may require the vhost driver to use a | |
1049 | * specific endianness. | |
1050 | */ | |
a122ab24 GK |
1051 | static inline bool vhost_needs_vring_endian(VirtIODevice *vdev) |
1052 | { | |
e5848123 GK |
1053 | if (virtio_vdev_has_feature(vdev, VIRTIO_F_VERSION_1)) { |
1054 | return false; | |
1055 | } | |
e03b5686 | 1056 | #if HOST_BIG_ENDIAN |
46f70ff1 | 1057 | return vdev->device_endian == VIRTIO_DEVICE_ENDIAN_LITTLE; |
a122ab24 | 1058 | #else |
46f70ff1 | 1059 | return vdev->device_endian == VIRTIO_DEVICE_ENDIAN_BIG; |
a122ab24 | 1060 | #endif |
a122ab24 GK |
1061 | } |
1062 | ||
04b7a152 GK |
1063 | static int vhost_virtqueue_set_vring_endian_legacy(struct vhost_dev *dev, |
1064 | bool is_big_endian, | |
1065 | int vhost_vq_index) | |
1066 | { | |
5d33ae4b | 1067 | int r; |
04b7a152 GK |
1068 | struct vhost_vring_state s = { |
1069 | .index = vhost_vq_index, | |
1070 | .num = is_big_endian | |
1071 | }; | |
1072 | ||
5d33ae4b RK |
1073 | r = dev->vhost_ops->vhost_set_vring_endian(dev, &s); |
1074 | if (r < 0) { | |
1075 | VHOST_OPS_DEBUG(r, "vhost_set_vring_endian failed"); | |
04b7a152 | 1076 | } |
5d33ae4b | 1077 | return r; |
04b7a152 GK |
1078 | } |
1079 | ||
c471ad0e JW |
1080 | static int vhost_memory_region_lookup(struct vhost_dev *hdev, |
1081 | uint64_t gpa, uint64_t *uaddr, | |
1082 | uint64_t *len) | |
1083 | { | |
1084 | int i; | |
1085 | ||
1086 | for (i = 0; i < hdev->mem->nregions; i++) { | |
1087 | struct vhost_memory_region *reg = hdev->mem->regions + i; | |
1088 | ||
1089 | if (gpa >= reg->guest_phys_addr && | |
1090 | reg->guest_phys_addr + reg->memory_size > gpa) { | |
1091 | *uaddr = reg->userspace_addr + gpa - reg->guest_phys_addr; | |
1092 | *len = reg->guest_phys_addr + reg->memory_size - gpa; | |
1093 | return 0; | |
1094 | } | |
1095 | } | |
1096 | ||
1097 | return -EFAULT; | |
1098 | } | |
1099 | ||
fc58bd0d | 1100 | int vhost_device_iotlb_miss(struct vhost_dev *dev, uint64_t iova, int write) |
c471ad0e JW |
1101 | { |
1102 | IOMMUTLBEntry iotlb; | |
1103 | uint64_t uaddr, len; | |
fc58bd0d | 1104 | int ret = -EFAULT; |
c471ad0e | 1105 | |
7a064bcc | 1106 | RCU_READ_LOCK_GUARD(); |
c471ad0e | 1107 | |
ffcbbe72 PX |
1108 | trace_vhost_iotlb_miss(dev, 1); |
1109 | ||
c471ad0e | 1110 | iotlb = address_space_get_iotlb_entry(dev->vdev->dma_as, |
7446eb07 PM |
1111 | iova, write, |
1112 | MEMTXATTRS_UNSPECIFIED); | |
c471ad0e | 1113 | if (iotlb.target_as != NULL) { |
fc58bd0d MC |
1114 | ret = vhost_memory_region_lookup(dev, iotlb.translated_addr, |
1115 | &uaddr, &len); | |
1116 | if (ret) { | |
ffcbbe72 | 1117 | trace_vhost_iotlb_miss(dev, 3); |
c471ad0e JW |
1118 | error_report("Fail to lookup the translated address " |
1119 | "%"PRIx64, iotlb.translated_addr); | |
1120 | goto out; | |
1121 | } | |
1122 | ||
1123 | len = MIN(iotlb.addr_mask + 1, len); | |
1124 | iova = iova & ~iotlb.addr_mask; | |
1125 | ||
020e571b MC |
1126 | ret = vhost_backend_update_device_iotlb(dev, iova, uaddr, |
1127 | len, iotlb.perm); | |
fc58bd0d | 1128 | if (ret) { |
ffcbbe72 | 1129 | trace_vhost_iotlb_miss(dev, 4); |
c471ad0e JW |
1130 | error_report("Fail to update device iotlb"); |
1131 | goto out; | |
1132 | } | |
1133 | } | |
ffcbbe72 PX |
1134 | |
1135 | trace_vhost_iotlb_miss(dev, 2); | |
1136 | ||
c471ad0e | 1137 | out: |
fc58bd0d | 1138 | return ret; |
c471ad0e JW |
1139 | } |
1140 | ||
ff48b628 KX |
1141 | int vhost_virtqueue_start(struct vhost_dev *dev, |
1142 | struct VirtIODevice *vdev, | |
1143 | struct vhost_virtqueue *vq, | |
1144 | unsigned idx) | |
d5970055 | 1145 | { |
96a3d98d JW |
1146 | BusState *qbus = BUS(qdev_get_parent_bus(DEVICE(vdev))); |
1147 | VirtioBusState *vbus = VIRTIO_BUS(qbus); | |
1148 | VirtioBusClass *k = VIRTIO_BUS_GET_CLASS(vbus); | |
a8170e5e | 1149 | hwaddr s, l, a; |
d5970055 | 1150 | int r; |
21e70425 | 1151 | int vhost_vq_index = dev->vhost_ops->vhost_get_vq_index(dev, idx); |
d5970055 | 1152 | struct vhost_vring_file file = { |
a9f98bb5 | 1153 | .index = vhost_vq_index |
d5970055 MT |
1154 | }; |
1155 | struct vhost_vring_state state = { | |
a9f98bb5 | 1156 | .index = vhost_vq_index |
d5970055 MT |
1157 | }; |
1158 | struct VirtQueue *vvq = virtio_get_queue(vdev, idx); | |
1159 | ||
fb20fbb7 JH |
1160 | a = virtio_queue_get_desc_addr(vdev, idx); |
1161 | if (a == 0) { | |
1162 | /* Queue might not be ready for start */ | |
1163 | return 0; | |
1164 | } | |
a9f98bb5 | 1165 | |
d5970055 | 1166 | vq->num = state.num = virtio_queue_get_num(vdev, idx); |
21e70425 | 1167 | r = dev->vhost_ops->vhost_set_vring_num(dev, &state); |
d5970055 | 1168 | if (r) { |
5d33ae4b RK |
1169 | VHOST_OPS_DEBUG(r, "vhost_set_vring_num failed"); |
1170 | return r; | |
d5970055 MT |
1171 | } |
1172 | ||
1173 | state.num = virtio_queue_get_last_avail_idx(vdev, idx); | |
21e70425 | 1174 | r = dev->vhost_ops->vhost_set_vring_base(dev, &state); |
d5970055 | 1175 | if (r) { |
5d33ae4b RK |
1176 | VHOST_OPS_DEBUG(r, "vhost_set_vring_base failed"); |
1177 | return r; | |
d5970055 MT |
1178 | } |
1179 | ||
e5848123 | 1180 | if (vhost_needs_vring_endian(vdev)) { |
04b7a152 GK |
1181 | r = vhost_virtqueue_set_vring_endian_legacy(dev, |
1182 | virtio_is_big_endian(vdev), | |
1183 | vhost_vq_index); | |
1184 | if (r) { | |
5d33ae4b | 1185 | return r; |
04b7a152 GK |
1186 | } |
1187 | } | |
1188 | ||
f1f9e6c5 | 1189 | vq->desc_size = s = l = virtio_queue_get_desc_size(vdev, idx); |
fb20fbb7 | 1190 | vq->desc_phys = a; |
b897a474 | 1191 | vq->desc = vhost_memory_map(dev, a, &l, false); |
d5970055 MT |
1192 | if (!vq->desc || l != s) { |
1193 | r = -ENOMEM; | |
1194 | goto fail_alloc_desc; | |
1195 | } | |
f1f9e6c5 GK |
1196 | vq->avail_size = s = l = virtio_queue_get_avail_size(vdev, idx); |
1197 | vq->avail_phys = a = virtio_queue_get_avail_addr(vdev, idx); | |
b897a474 | 1198 | vq->avail = vhost_memory_map(dev, a, &l, false); |
d5970055 MT |
1199 | if (!vq->avail || l != s) { |
1200 | r = -ENOMEM; | |
1201 | goto fail_alloc_avail; | |
1202 | } | |
1203 | vq->used_size = s = l = virtio_queue_get_used_size(vdev, idx); | |
1204 | vq->used_phys = a = virtio_queue_get_used_addr(vdev, idx); | |
b897a474 | 1205 | vq->used = vhost_memory_map(dev, a, &l, true); |
d5970055 MT |
1206 | if (!vq->used || l != s) { |
1207 | r = -ENOMEM; | |
1208 | goto fail_alloc_used; | |
1209 | } | |
1210 | ||
a9f98bb5 | 1211 | r = vhost_virtqueue_set_addr(dev, vq, vhost_vq_index, dev->log_enabled); |
d5970055 | 1212 | if (r < 0) { |
d5970055 MT |
1213 | goto fail_alloc; |
1214 | } | |
a9f98bb5 | 1215 | |
d5970055 | 1216 | file.fd = event_notifier_get_fd(virtio_queue_get_host_notifier(vvq)); |
21e70425 | 1217 | r = dev->vhost_ops->vhost_set_vring_kick(dev, &file); |
d5970055 | 1218 | if (r) { |
5d33ae4b | 1219 | VHOST_OPS_DEBUG(r, "vhost_set_vring_kick failed"); |
d5970055 MT |
1220 | goto fail_kick; |
1221 | } | |
1222 | ||
f56a1247 MT |
1223 | /* Clear and discard previous events if any. */ |
1224 | event_notifier_test_and_clear(&vq->masked_notifier); | |
d5970055 | 1225 | |
5669655a VK |
1226 | /* Init vring in unmasked state, unless guest_notifier_mask |
1227 | * will do it later. | |
1228 | */ | |
1229 | if (!vdev->use_guest_notifier_mask) { | |
1230 | /* TODO: check and handle errors. */ | |
1231 | vhost_virtqueue_mask(dev, vdev, idx, false); | |
1232 | } | |
1233 | ||
96a3d98d JW |
1234 | if (k->query_guest_notifiers && |
1235 | k->query_guest_notifiers(qbus->parent) && | |
1236 | virtio_queue_vector(vdev, idx) == VIRTIO_NO_VECTOR) { | |
1237 | file.fd = -1; | |
1238 | r = dev->vhost_ops->vhost_set_vring_call(dev, &file); | |
1239 | if (r) { | |
1240 | goto fail_vector; | |
1241 | } | |
1242 | } | |
1243 | ||
d5970055 MT |
1244 | return 0; |
1245 | ||
96a3d98d | 1246 | fail_vector: |
d5970055 | 1247 | fail_kick: |
d5970055 | 1248 | fail_alloc: |
c471ad0e JW |
1249 | vhost_memory_unmap(dev, vq->used, virtio_queue_get_used_size(vdev, idx), |
1250 | 0, 0); | |
d5970055 | 1251 | fail_alloc_used: |
c471ad0e JW |
1252 | vhost_memory_unmap(dev, vq->avail, virtio_queue_get_avail_size(vdev, idx), |
1253 | 0, 0); | |
d5970055 | 1254 | fail_alloc_avail: |
c471ad0e JW |
1255 | vhost_memory_unmap(dev, vq->desc, virtio_queue_get_desc_size(vdev, idx), |
1256 | 0, 0); | |
d5970055 MT |
1257 | fail_alloc_desc: |
1258 | return r; | |
1259 | } | |
1260 | ||
e1f101d9 KX |
1261 | void vhost_virtqueue_stop(struct vhost_dev *dev, |
1262 | struct VirtIODevice *vdev, | |
1263 | struct vhost_virtqueue *vq, | |
1264 | unsigned idx) | |
d5970055 | 1265 | { |
21e70425 | 1266 | int vhost_vq_index = dev->vhost_ops->vhost_get_vq_index(dev, idx); |
d5970055 | 1267 | struct vhost_vring_state state = { |
04b7a152 | 1268 | .index = vhost_vq_index, |
d5970055 MT |
1269 | }; |
1270 | int r; | |
fb20fbb7 | 1271 | |
fa4ae4be | 1272 | if (virtio_queue_get_desc_addr(vdev, idx) == 0) { |
fb20fbb7 JH |
1273 | /* Don't stop the virtqueue which might have not been started */ |
1274 | return; | |
1275 | } | |
fc57fd99 | 1276 | |
21e70425 | 1277 | r = dev->vhost_ops->vhost_get_vring_base(dev, &state); |
d5970055 | 1278 | if (r < 0) { |
5d33ae4b | 1279 | VHOST_OPS_DEBUG(r, "vhost VQ %u ring restore failed: %d", idx, r); |
2ae39a11 MC |
1280 | /* Connection to the backend is broken, so let's sync internal |
1281 | * last avail idx to the device used idx. | |
1282 | */ | |
1283 | virtio_queue_restore_last_avail_idx(vdev, idx); | |
499c5579 MAL |
1284 | } else { |
1285 | virtio_queue_set_last_avail_idx(vdev, idx, state.num); | |
d5970055 | 1286 | } |
3561ba14 | 1287 | virtio_queue_invalidate_signalled_used(vdev, idx); |
aa94d521 | 1288 | virtio_queue_update_used_idx(vdev, idx); |
04b7a152 GK |
1289 | |
1290 | /* In the cross-endian case, we need to reset the vring endianness to | |
1291 | * native as legacy devices expect so by default. | |
1292 | */ | |
e5848123 | 1293 | if (vhost_needs_vring_endian(vdev)) { |
162bba7f MAL |
1294 | vhost_virtqueue_set_vring_endian_legacy(dev, |
1295 | !virtio_is_big_endian(vdev), | |
1296 | vhost_vq_index); | |
04b7a152 GK |
1297 | } |
1298 | ||
c471ad0e JW |
1299 | vhost_memory_unmap(dev, vq->used, virtio_queue_get_used_size(vdev, idx), |
1300 | 1, virtio_queue_get_used_size(vdev, idx)); | |
1301 | vhost_memory_unmap(dev, vq->avail, virtio_queue_get_avail_size(vdev, idx), | |
1302 | 0, virtio_queue_get_avail_size(vdev, idx)); | |
1303 | vhost_memory_unmap(dev, vq->desc, virtio_queue_get_desc_size(vdev, idx), | |
1304 | 0, virtio_queue_get_desc_size(vdev, idx)); | |
d5970055 MT |
1305 | } |
1306 | ||
69e87b32 JW |
1307 | static int vhost_virtqueue_set_busyloop_timeout(struct vhost_dev *dev, |
1308 | int n, uint32_t timeout) | |
1309 | { | |
1310 | int vhost_vq_index = dev->vhost_ops->vhost_get_vq_index(dev, n); | |
1311 | struct vhost_vring_state state = { | |
1312 | .index = vhost_vq_index, | |
1313 | .num = timeout, | |
1314 | }; | |
1315 | int r; | |
1316 | ||
1317 | if (!dev->vhost_ops->vhost_set_vring_busyloop_timeout) { | |
1318 | return -EINVAL; | |
1319 | } | |
1320 | ||
1321 | r = dev->vhost_ops->vhost_set_vring_busyloop_timeout(dev, &state); | |
1322 | if (r) { | |
5d33ae4b | 1323 | VHOST_OPS_DEBUG(r, "vhost_set_vring_busyloop_timeout failed"); |
69e87b32 JW |
1324 | return r; |
1325 | } | |
1326 | ||
1327 | return 0; | |
1328 | } | |
1329 | ||
ae50ae0b KK |
1330 | static void vhost_virtqueue_error_notifier(EventNotifier *n) |
1331 | { | |
1332 | struct vhost_virtqueue *vq = container_of(n, struct vhost_virtqueue, | |
1333 | error_notifier); | |
1334 | struct vhost_dev *dev = vq->dev; | |
1335 | int index = vq - dev->vqs; | |
1336 | ||
1337 | if (event_notifier_test_and_clear(n) && dev->vdev) { | |
1338 | VHOST_OPS_DEBUG(-EINVAL, "vhost vring error in virtqueue %d", | |
1339 | dev->vq_index + index); | |
1340 | } | |
1341 | } | |
1342 | ||
f56a1247 MT |
1343 | static int vhost_virtqueue_init(struct vhost_dev *dev, |
1344 | struct vhost_virtqueue *vq, int n) | |
1345 | { | |
21e70425 | 1346 | int vhost_vq_index = dev->vhost_ops->vhost_get_vq_index(dev, n); |
f56a1247 | 1347 | struct vhost_vring_file file = { |
b931bfbf | 1348 | .index = vhost_vq_index, |
f56a1247 MT |
1349 | }; |
1350 | int r = event_notifier_init(&vq->masked_notifier, 0); | |
1351 | if (r < 0) { | |
1352 | return r; | |
1353 | } | |
1354 | ||
ff5eb77b | 1355 | file.fd = event_notifier_get_wfd(&vq->masked_notifier); |
21e70425 | 1356 | r = dev->vhost_ops->vhost_set_vring_call(dev, &file); |
f56a1247 | 1357 | if (r) { |
5d33ae4b | 1358 | VHOST_OPS_DEBUG(r, "vhost_set_vring_call failed"); |
f56a1247 MT |
1359 | goto fail_call; |
1360 | } | |
c471ad0e JW |
1361 | |
1362 | vq->dev = dev; | |
1363 | ||
ae50ae0b KK |
1364 | if (dev->vhost_ops->vhost_set_vring_err) { |
1365 | r = event_notifier_init(&vq->error_notifier, 0); | |
1366 | if (r < 0) { | |
1367 | goto fail_call; | |
1368 | } | |
1369 | ||
1370 | file.fd = event_notifier_get_fd(&vq->error_notifier); | |
1371 | r = dev->vhost_ops->vhost_set_vring_err(dev, &file); | |
1372 | if (r) { | |
1373 | VHOST_OPS_DEBUG(r, "vhost_set_vring_err failed"); | |
1374 | goto fail_err; | |
1375 | } | |
1376 | ||
1377 | event_notifier_set_handler(&vq->error_notifier, | |
1378 | vhost_virtqueue_error_notifier); | |
1379 | } | |
1380 | ||
f56a1247 | 1381 | return 0; |
ae50ae0b KK |
1382 | |
1383 | fail_err: | |
1384 | event_notifier_cleanup(&vq->error_notifier); | |
f56a1247 MT |
1385 | fail_call: |
1386 | event_notifier_cleanup(&vq->masked_notifier); | |
1387 | return r; | |
1388 | } | |
1389 | ||
1390 | static void vhost_virtqueue_cleanup(struct vhost_virtqueue *vq) | |
1391 | { | |
1392 | event_notifier_cleanup(&vq->masked_notifier); | |
ae50ae0b KK |
1393 | if (vq->dev->vhost_ops->vhost_set_vring_err) { |
1394 | event_notifier_set_handler(&vq->error_notifier, NULL); | |
1395 | event_notifier_cleanup(&vq->error_notifier); | |
1396 | } | |
f56a1247 MT |
1397 | } |
1398 | ||
81647a65 | 1399 | int vhost_dev_init(struct vhost_dev *hdev, void *opaque, |
a6945f22 KW |
1400 | VhostBackendType backend_type, uint32_t busyloop_timeout, |
1401 | Error **errp) | |
d5970055 MT |
1402 | { |
1403 | uint64_t features; | |
a06db3ec | 1404 | int i, r, n_initialized_vqs = 0; |
81647a65 | 1405 | |
c471ad0e | 1406 | hdev->vdev = NULL; |
d2fc4402 MAL |
1407 | hdev->migration_blocker = NULL; |
1408 | ||
7cb8a9b9 MAL |
1409 | r = vhost_set_backend_type(hdev, backend_type); |
1410 | assert(r >= 0); | |
1a1bfac9 | 1411 | |
28770ff9 | 1412 | r = hdev->vhost_ops->vhost_backend_init(hdev, opaque, errp); |
7cb8a9b9 MAL |
1413 | if (r < 0) { |
1414 | goto fail; | |
24d1eb33 NN |
1415 | } |
1416 | ||
21e70425 | 1417 | r = hdev->vhost_ops->vhost_set_owner(hdev); |
d5970055 | 1418 | if (r < 0) { |
f2a6e6c4 | 1419 | error_setg_errno(errp, -r, "vhost_set_owner failed"); |
d5970055 MT |
1420 | goto fail; |
1421 | } | |
1422 | ||
21e70425 | 1423 | r = hdev->vhost_ops->vhost_get_features(hdev, &features); |
d5970055 | 1424 | if (r < 0) { |
f2a6e6c4 | 1425 | error_setg_errno(errp, -r, "vhost_get_features failed"); |
d5970055 MT |
1426 | goto fail; |
1427 | } | |
f56a1247 | 1428 | |
a06db3ec | 1429 | for (i = 0; i < hdev->nvqs; ++i, ++n_initialized_vqs) { |
b931bfbf | 1430 | r = vhost_virtqueue_init(hdev, hdev->vqs + i, hdev->vq_index + i); |
f56a1247 | 1431 | if (r < 0) { |
a6945f22 | 1432 | error_setg_errno(errp, -r, "Failed to initialize virtqueue %d", i); |
a06db3ec | 1433 | goto fail; |
f56a1247 MT |
1434 | } |
1435 | } | |
69e87b32 JW |
1436 | |
1437 | if (busyloop_timeout) { | |
1438 | for (i = 0; i < hdev->nvqs; ++i) { | |
1439 | r = vhost_virtqueue_set_busyloop_timeout(hdev, hdev->vq_index + i, | |
1440 | busyloop_timeout); | |
1441 | if (r < 0) { | |
f2a6e6c4 | 1442 | error_setg_errno(errp, -r, "Failed to set busyloop timeout"); |
69e87b32 JW |
1443 | goto fail_busyloop; |
1444 | } | |
1445 | } | |
1446 | } | |
1447 | ||
d5970055 MT |
1448 | hdev->features = features; |
1449 | ||
04097f7c | 1450 | hdev->memory_listener = (MemoryListener) { |
142518bd | 1451 | .name = "vhost", |
50c1e149 AK |
1452 | .begin = vhost_begin, |
1453 | .commit = vhost_commit, | |
938eeb64 DDAG |
1454 | .region_add = vhost_region_addnop, |
1455 | .region_nop = vhost_region_addnop, | |
04097f7c AK |
1456 | .log_start = vhost_log_start, |
1457 | .log_stop = vhost_log_stop, | |
1458 | .log_sync = vhost_log_sync, | |
1459 | .log_global_start = vhost_log_global_start, | |
1460 | .log_global_stop = vhost_log_global_stop, | |
8be0461d | 1461 | .priority = MEMORY_LISTENER_PRIORITY_DEV_BACKEND |
04097f7c | 1462 | }; |
d2fc4402 | 1463 | |
375f74f4 | 1464 | hdev->iommu_listener = (MemoryListener) { |
142518bd | 1465 | .name = "vhost-iommu", |
375f74f4 JW |
1466 | .region_add = vhost_iommu_region_add, |
1467 | .region_del = vhost_iommu_region_del, | |
1468 | }; | |
c471ad0e | 1469 | |
d2fc4402 MAL |
1470 | if (hdev->migration_blocker == NULL) { |
1471 | if (!(hdev->features & (0x1ULL << VHOST_F_LOG_ALL))) { | |
1472 | error_setg(&hdev->migration_blocker, | |
1473 | "Migration disabled: vhost lacks VHOST_F_LOG_ALL feature."); | |
648abbfb | 1474 | } else if (vhost_dev_log_is_shared(hdev) && !qemu_memfd_alloc_check()) { |
31190ed7 MAL |
1475 | error_setg(&hdev->migration_blocker, |
1476 | "Migration disabled: failed to allocate shared memory"); | |
d2fc4402 MAL |
1477 | } |
1478 | } | |
1479 | ||
1480 | if (hdev->migration_blocker != NULL) { | |
28770ff9 | 1481 | r = migrate_add_blocker(hdev->migration_blocker, errp); |
436c831a | 1482 | if (r < 0) { |
fe44dc91 AA |
1483 | error_free(hdev->migration_blocker); |
1484 | goto fail_busyloop; | |
1485 | } | |
7145872e | 1486 | } |
d2fc4402 | 1487 | |
7267c094 | 1488 | hdev->mem = g_malloc0(offsetof(struct vhost_memory, regions)); |
2817b260 AK |
1489 | hdev->n_mem_sections = 0; |
1490 | hdev->mem_sections = NULL; | |
d5970055 MT |
1491 | hdev->log = NULL; |
1492 | hdev->log_size = 0; | |
1493 | hdev->log_enabled = false; | |
1494 | hdev->started = false; | |
f6790af6 | 1495 | memory_listener_register(&hdev->memory_listener, &address_space_memory); |
5be5f9be | 1496 | QLIST_INSERT_HEAD(&vhost_devices, hdev, entry); |
9e2a2a3e JZ |
1497 | |
1498 | if (used_memslots > hdev->vhost_ops->vhost_backend_memslots_limit(hdev)) { | |
a6945f22 KW |
1499 | error_setg(errp, "vhost backend memory slots limit is less" |
1500 | " than current number of present memory slots"); | |
f2a6e6c4 | 1501 | r = -EINVAL; |
1d8d014e | 1502 | goto fail_busyloop; |
9e2a2a3e JZ |
1503 | } |
1504 | ||
d5970055 | 1505 | return 0; |
a06db3ec | 1506 | |
69e87b32 | 1507 | fail_busyloop: |
1d8d014e SH |
1508 | if (busyloop_timeout) { |
1509 | while (--i >= 0) { | |
1510 | vhost_virtqueue_set_busyloop_timeout(hdev, hdev->vq_index + i, 0); | |
1511 | } | |
69e87b32 | 1512 | } |
d5970055 | 1513 | fail: |
a06db3ec MAL |
1514 | hdev->nvqs = n_initialized_vqs; |
1515 | vhost_dev_cleanup(hdev); | |
d5970055 MT |
1516 | return r; |
1517 | } | |
1518 | ||
1519 | void vhost_dev_cleanup(struct vhost_dev *hdev) | |
1520 | { | |
f56a1247 | 1521 | int i; |
e0547b59 | 1522 | |
a2761231 AB |
1523 | trace_vhost_dev_cleanup(hdev); |
1524 | ||
f56a1247 MT |
1525 | for (i = 0; i < hdev->nvqs; ++i) { |
1526 | vhost_virtqueue_cleanup(hdev->vqs + i); | |
1527 | } | |
5be5f9be MAL |
1528 | if (hdev->mem) { |
1529 | /* those are only safe after successful init */ | |
1530 | memory_listener_unregister(&hdev->memory_listener); | |
1531 | QLIST_REMOVE(hdev, entry); | |
1532 | } | |
7145872e MT |
1533 | if (hdev->migration_blocker) { |
1534 | migrate_del_blocker(hdev->migration_blocker); | |
1535 | error_free(hdev->migration_blocker); | |
1536 | } | |
7267c094 | 1537 | g_free(hdev->mem); |
2817b260 | 1538 | g_free(hdev->mem_sections); |
e0547b59 MAL |
1539 | if (hdev->vhost_ops) { |
1540 | hdev->vhost_ops->vhost_backend_cleanup(hdev); | |
1541 | } | |
7b527247 | 1542 | assert(!hdev->log); |
e0547b59 MAL |
1543 | |
1544 | memset(hdev, 0, sizeof(struct vhost_dev)); | |
d5970055 MT |
1545 | } |
1546 | ||
92099aa4 LV |
1547 | static void vhost_dev_disable_notifiers_nvqs(struct vhost_dev *hdev, |
1548 | VirtIODevice *vdev, | |
1549 | unsigned int nvqs) | |
1550 | { | |
1551 | BusState *qbus = BUS(qdev_get_parent_bus(DEVICE(vdev))); | |
1552 | int i, r; | |
1553 | ||
1554 | /* | |
1555 | * Batch all the host notifiers in a single transaction to avoid | |
1556 | * quadratic time complexity in address_space_update_ioeventfds(). | |
1557 | */ | |
1558 | memory_region_transaction_begin(); | |
1559 | ||
1560 | for (i = 0; i < nvqs; ++i) { | |
1561 | r = virtio_bus_set_host_notifier(VIRTIO_BUS(qbus), hdev->vq_index + i, | |
1562 | false); | |
1563 | if (r < 0) { | |
1564 | error_report("vhost VQ %d notifier cleanup failed: %d", i, -r); | |
1565 | } | |
1566 | assert(r >= 0); | |
1567 | } | |
1568 | ||
1569 | /* | |
1570 | * The transaction expects the ioeventfds to be open when it | |
1571 | * commits. Do it now, before the cleanup loop. | |
1572 | */ | |
1573 | memory_region_transaction_commit(); | |
1574 | ||
1575 | for (i = 0; i < nvqs; ++i) { | |
1576 | virtio_bus_cleanup_host_notifier(VIRTIO_BUS(qbus), hdev->vq_index + i); | |
1577 | } | |
1578 | virtio_device_release_ioeventfd(vdev); | |
1579 | } | |
1580 | ||
b0b3db79 MT |
1581 | /* Stop processing guest IO notifications in qemu. |
1582 | * Start processing them in vhost in kernel. | |
1583 | */ | |
1584 | int vhost_dev_enable_notifiers(struct vhost_dev *hdev, VirtIODevice *vdev) | |
1585 | { | |
1c819449 | 1586 | BusState *qbus = BUS(qdev_get_parent_bus(DEVICE(vdev))); |
8771589b | 1587 | int i, r; |
4afba631 | 1588 | |
310837de PB |
1589 | /* We will pass the notifiers to the kernel, make sure that QEMU |
1590 | * doesn't interfere. | |
1591 | */ | |
1592 | r = virtio_device_grab_ioeventfd(vdev); | |
1593 | if (r < 0) { | |
4afba631 | 1594 | error_report("binding does not support host notifiers"); |
8771589b | 1595 | return r; |
b0b3db79 MT |
1596 | } |
1597 | ||
0fdc6b85 LM |
1598 | /* |
1599 | * Batch all the host notifiers in a single transaction to avoid | |
1600 | * quadratic time complexity in address_space_update_ioeventfds(). | |
1601 | */ | |
1602 | memory_region_transaction_begin(); | |
1603 | ||
b0b3db79 | 1604 | for (i = 0; i < hdev->nvqs; ++i) { |
b1f0a33d CH |
1605 | r = virtio_bus_set_host_notifier(VIRTIO_BUS(qbus), hdev->vq_index + i, |
1606 | true); | |
b0b3db79 | 1607 | if (r < 0) { |
4afba631 | 1608 | error_report("vhost VQ %d notifier binding failed: %d", i, -r); |
0fdc6b85 | 1609 | memory_region_transaction_commit(); |
92099aa4 | 1610 | vhost_dev_disable_notifiers_nvqs(hdev, vdev, i); |
8771589b | 1611 | return r; |
b0b3db79 MT |
1612 | } |
1613 | } | |
1614 | ||
0fdc6b85 LM |
1615 | memory_region_transaction_commit(); |
1616 | ||
b0b3db79 | 1617 | return 0; |
b0b3db79 MT |
1618 | } |
1619 | ||
1620 | /* Stop processing guest IO notifications in vhost. | |
1621 | * Start processing them in qemu. | |
1622 | * This might actually run the qemu handlers right away, | |
1623 | * so virtio in qemu must be completely setup when this is called. | |
1624 | */ | |
1625 | void vhost_dev_disable_notifiers(struct vhost_dev *hdev, VirtIODevice *vdev) | |
1626 | { | |
92099aa4 | 1627 | vhost_dev_disable_notifiers_nvqs(hdev, vdev, hdev->nvqs); |
b0b3db79 MT |
1628 | } |
1629 | ||
f56a1247 MT |
1630 | /* Test and clear event pending status. |
1631 | * Should be called after unmask to avoid losing events. | |
1632 | */ | |
1633 | bool vhost_virtqueue_pending(struct vhost_dev *hdev, int n) | |
1634 | { | |
a9f98bb5 | 1635 | struct vhost_virtqueue *vq = hdev->vqs + n - hdev->vq_index; |
a9f98bb5 | 1636 | assert(n >= hdev->vq_index && n < hdev->vq_index + hdev->nvqs); |
f56a1247 MT |
1637 | return event_notifier_test_and_clear(&vq->masked_notifier); |
1638 | } | |
1639 | ||
1640 | /* Mask/unmask events from this vq. */ | |
1641 | void vhost_virtqueue_mask(struct vhost_dev *hdev, VirtIODevice *vdev, int n, | |
1642 | bool mask) | |
1643 | { | |
1644 | struct VirtQueue *vvq = virtio_get_queue(vdev, n); | |
a9f98bb5 | 1645 | int r, index = n - hdev->vq_index; |
fc57fd99 | 1646 | struct vhost_vring_file file; |
f56a1247 | 1647 | |
8695de0f MAL |
1648 | /* should only be called after backend is connected */ |
1649 | assert(hdev->vhost_ops); | |
1650 | ||
f56a1247 | 1651 | if (mask) { |
5669655a | 1652 | assert(vdev->use_guest_notifier_mask); |
ff5eb77b | 1653 | file.fd = event_notifier_get_wfd(&hdev->vqs[index].masked_notifier); |
f56a1247 | 1654 | } else { |
ff5eb77b | 1655 | file.fd = event_notifier_get_wfd(virtio_queue_get_guest_notifier(vvq)); |
f56a1247 | 1656 | } |
fc57fd99 | 1657 | |
21e70425 MAL |
1658 | file.index = hdev->vhost_ops->vhost_get_vq_index(hdev, n); |
1659 | r = hdev->vhost_ops->vhost_set_vring_call(hdev, &file); | |
162bba7f | 1660 | if (r < 0) { |
f9a09ca3 CL |
1661 | error_report("vhost_set_vring_call failed %d", -r); |
1662 | } | |
1663 | } | |
1664 | ||
1665 | bool vhost_config_pending(struct vhost_dev *hdev) | |
1666 | { | |
1667 | assert(hdev->vhost_ops); | |
1668 | if ((hdev->started == false) || | |
1669 | (hdev->vhost_ops->vhost_set_config_call == NULL)) { | |
1670 | return false; | |
1671 | } | |
1672 | ||
1673 | EventNotifier *notifier = | |
1674 | &hdev->vqs[VHOST_QUEUE_NUM_CONFIG_INR].masked_config_notifier; | |
1675 | return event_notifier_test_and_clear(notifier); | |
1676 | } | |
1677 | ||
1678 | void vhost_config_mask(struct vhost_dev *hdev, VirtIODevice *vdev, bool mask) | |
1679 | { | |
1680 | int fd; | |
1681 | int r; | |
1682 | EventNotifier *notifier = | |
1683 | &hdev->vqs[VHOST_QUEUE_NUM_CONFIG_INR].masked_config_notifier; | |
1684 | EventNotifier *config_notifier = &vdev->config_notifier; | |
1685 | assert(hdev->vhost_ops); | |
1686 | ||
1687 | if ((hdev->started == false) || | |
1688 | (hdev->vhost_ops->vhost_set_config_call == NULL)) { | |
1689 | return; | |
1690 | } | |
1691 | if (mask) { | |
1692 | assert(vdev->use_guest_notifier_mask); | |
1693 | fd = event_notifier_get_fd(notifier); | |
1694 | } else { | |
1695 | fd = event_notifier_get_fd(config_notifier); | |
1696 | } | |
1697 | r = hdev->vhost_ops->vhost_set_config_call(hdev, fd); | |
1698 | if (r < 0) { | |
1699 | error_report("vhost_set_config_call failed %d", -r); | |
1700 | } | |
1701 | } | |
1702 | ||
1703 | static void vhost_stop_config_intr(struct vhost_dev *dev) | |
1704 | { | |
1705 | int fd = -1; | |
1706 | assert(dev->vhost_ops); | |
1707 | if (dev->vhost_ops->vhost_set_config_call) { | |
1708 | dev->vhost_ops->vhost_set_config_call(dev, fd); | |
1709 | } | |
1710 | } | |
1711 | ||
1712 | static void vhost_start_config_intr(struct vhost_dev *dev) | |
1713 | { | |
1714 | int r; | |
1715 | ||
1716 | assert(dev->vhost_ops); | |
1717 | int fd = event_notifier_get_fd(&dev->vdev->config_notifier); | |
1718 | if (dev->vhost_ops->vhost_set_config_call) { | |
1719 | r = dev->vhost_ops->vhost_set_config_call(dev, fd); | |
1720 | if (!r) { | |
1721 | event_notifier_set(&dev->vdev->config_notifier); | |
1722 | } | |
162bba7f | 1723 | } |
f56a1247 MT |
1724 | } |
1725 | ||
9a2ba823 CH |
1726 | uint64_t vhost_get_features(struct vhost_dev *hdev, const int *feature_bits, |
1727 | uint64_t features) | |
2e6d46d7 NN |
1728 | { |
1729 | const int *bit = feature_bits; | |
1730 | while (*bit != VHOST_INVALID_FEATURE_BIT) { | |
9a2ba823 | 1731 | uint64_t bit_mask = (1ULL << *bit); |
2e6d46d7 NN |
1732 | if (!(hdev->features & bit_mask)) { |
1733 | features &= ~bit_mask; | |
1734 | } | |
1735 | bit++; | |
1736 | } | |
1737 | return features; | |
1738 | } | |
1739 | ||
1740 | void vhost_ack_features(struct vhost_dev *hdev, const int *feature_bits, | |
9a2ba823 | 1741 | uint64_t features) |
2e6d46d7 NN |
1742 | { |
1743 | const int *bit = feature_bits; | |
1744 | while (*bit != VHOST_INVALID_FEATURE_BIT) { | |
9a2ba823 | 1745 | uint64_t bit_mask = (1ULL << *bit); |
2e6d46d7 NN |
1746 | if (features & bit_mask) { |
1747 | hdev->acked_features |= bit_mask; | |
1748 | } | |
1749 | bit++; | |
1750 | } | |
1751 | } | |
1752 | ||
4c3e257b | 1753 | int vhost_dev_get_config(struct vhost_dev *hdev, uint8_t *config, |
50de5138 | 1754 | uint32_t config_len, Error **errp) |
4c3e257b CL |
1755 | { |
1756 | assert(hdev->vhost_ops); | |
1757 | ||
1758 | if (hdev->vhost_ops->vhost_get_config) { | |
66647ed4 MA |
1759 | return hdev->vhost_ops->vhost_get_config(hdev, config, config_len, |
1760 | errp); | |
4c3e257b CL |
1761 | } |
1762 | ||
50de5138 | 1763 | error_setg(errp, "vhost_get_config not implemented"); |
5d33ae4b | 1764 | return -ENOSYS; |
4c3e257b CL |
1765 | } |
1766 | ||
1767 | int vhost_dev_set_config(struct vhost_dev *hdev, const uint8_t *data, | |
1768 | uint32_t offset, uint32_t size, uint32_t flags) | |
1769 | { | |
1770 | assert(hdev->vhost_ops); | |
1771 | ||
1772 | if (hdev->vhost_ops->vhost_set_config) { | |
1773 | return hdev->vhost_ops->vhost_set_config(hdev, data, offset, | |
1774 | size, flags); | |
1775 | } | |
1776 | ||
5d33ae4b | 1777 | return -ENOSYS; |
4c3e257b CL |
1778 | } |
1779 | ||
1780 | void vhost_dev_set_config_notifier(struct vhost_dev *hdev, | |
1781 | const VhostDevConfigOps *ops) | |
1782 | { | |
4c3e257b CL |
1783 | hdev->config_ops = ops; |
1784 | } | |
1785 | ||
5ad204bf XY |
1786 | void vhost_dev_free_inflight(struct vhost_inflight *inflight) |
1787 | { | |
0ac2e635 | 1788 | if (inflight && inflight->addr) { |
5ad204bf XY |
1789 | qemu_memfd_free(inflight->addr, inflight->size, inflight->fd); |
1790 | inflight->addr = NULL; | |
1791 | inflight->fd = -1; | |
1792 | } | |
1793 | } | |
1794 | ||
1795 | static int vhost_dev_resize_inflight(struct vhost_inflight *inflight, | |
1796 | uint64_t new_size) | |
1797 | { | |
1798 | Error *err = NULL; | |
1799 | int fd = -1; | |
1800 | void *addr = qemu_memfd_alloc("vhost-inflight", new_size, | |
1801 | F_SEAL_GROW | F_SEAL_SHRINK | F_SEAL_SEAL, | |
1802 | &fd, &err); | |
1803 | ||
1804 | if (err) { | |
1805 | error_report_err(err); | |
5d33ae4b | 1806 | return -ENOMEM; |
5ad204bf XY |
1807 | } |
1808 | ||
1809 | vhost_dev_free_inflight(inflight); | |
1810 | inflight->offset = 0; | |
1811 | inflight->addr = addr; | |
1812 | inflight->fd = fd; | |
1813 | inflight->size = new_size; | |
1814 | ||
1815 | return 0; | |
1816 | } | |
1817 | ||
1818 | void vhost_dev_save_inflight(struct vhost_inflight *inflight, QEMUFile *f) | |
1819 | { | |
1820 | if (inflight->addr) { | |
1821 | qemu_put_be64(f, inflight->size); | |
1822 | qemu_put_be16(f, inflight->queue_size); | |
1823 | qemu_put_buffer(f, inflight->addr, inflight->size); | |
1824 | } else { | |
1825 | qemu_put_be64(f, 0); | |
1826 | } | |
1827 | } | |
1828 | ||
1829 | int vhost_dev_load_inflight(struct vhost_inflight *inflight, QEMUFile *f) | |
1830 | { | |
1831 | uint64_t size; | |
1832 | ||
1833 | size = qemu_get_be64(f); | |
1834 | if (!size) { | |
1835 | return 0; | |
1836 | } | |
1837 | ||
1838 | if (inflight->size != size) { | |
5d33ae4b RK |
1839 | int ret = vhost_dev_resize_inflight(inflight, size); |
1840 | if (ret < 0) { | |
1841 | return ret; | |
5ad204bf XY |
1842 | } |
1843 | } | |
1844 | inflight->queue_size = qemu_get_be16(f); | |
1845 | ||
1846 | qemu_get_buffer(f, inflight->addr, size); | |
1847 | ||
1848 | return 0; | |
1849 | } | |
1850 | ||
1b0063b3 JY |
1851 | int vhost_dev_prepare_inflight(struct vhost_dev *hdev, VirtIODevice *vdev) |
1852 | { | |
1853 | int r; | |
1854 | ||
1855 | if (hdev->vhost_ops->vhost_get_inflight_fd == NULL || | |
1856 | hdev->vhost_ops->vhost_set_inflight_fd == NULL) { | |
1857 | return 0; | |
1858 | } | |
1859 | ||
1860 | hdev->vdev = vdev; | |
1861 | ||
1862 | r = vhost_dev_set_features(hdev, hdev->log_enabled); | |
1863 | if (r < 0) { | |
5d33ae4b | 1864 | VHOST_OPS_DEBUG(r, "vhost_dev_prepare_inflight failed"); |
1b0063b3 JY |
1865 | return r; |
1866 | } | |
1867 | ||
1868 | return 0; | |
1869 | } | |
1870 | ||
5ad204bf XY |
1871 | int vhost_dev_set_inflight(struct vhost_dev *dev, |
1872 | struct vhost_inflight *inflight) | |
1873 | { | |
1874 | int r; | |
1875 | ||
1876 | if (dev->vhost_ops->vhost_set_inflight_fd && inflight->addr) { | |
1877 | r = dev->vhost_ops->vhost_set_inflight_fd(dev, inflight); | |
1878 | if (r) { | |
5d33ae4b RK |
1879 | VHOST_OPS_DEBUG(r, "vhost_set_inflight_fd failed"); |
1880 | return r; | |
5ad204bf XY |
1881 | } |
1882 | } | |
1883 | ||
1884 | return 0; | |
1885 | } | |
1886 | ||
1887 | int vhost_dev_get_inflight(struct vhost_dev *dev, uint16_t queue_size, | |
1888 | struct vhost_inflight *inflight) | |
1889 | { | |
1890 | int r; | |
1891 | ||
1892 | if (dev->vhost_ops->vhost_get_inflight_fd) { | |
1893 | r = dev->vhost_ops->vhost_get_inflight_fd(dev, queue_size, inflight); | |
1894 | if (r) { | |
5d33ae4b RK |
1895 | VHOST_OPS_DEBUG(r, "vhost_get_inflight_fd failed"); |
1896 | return r; | |
5ad204bf XY |
1897 | } |
1898 | } | |
1899 | ||
1900 | return 0; | |
1901 | } | |
1902 | ||
4daa5054 SG |
1903 | static int vhost_dev_set_vring_enable(struct vhost_dev *hdev, int enable) |
1904 | { | |
1905 | if (!hdev->vhost_ops->vhost_set_vring_enable) { | |
1906 | return 0; | |
1907 | } | |
1908 | ||
1909 | /* | |
1910 | * For vhost-user devices, if VHOST_USER_F_PROTOCOL_FEATURES has not | |
1911 | * been negotiated, the rings start directly in the enabled state, and | |
1912 | * .vhost_set_vring_enable callback will fail since | |
1913 | * VHOST_USER_SET_VRING_ENABLE is not supported. | |
1914 | */ | |
1915 | if (hdev->vhost_ops->backend_type == VHOST_BACKEND_TYPE_USER && | |
1916 | !virtio_has_feature(hdev->backend_features, | |
1917 | VHOST_USER_F_PROTOCOL_FEATURES)) { | |
1918 | return 0; | |
1919 | } | |
1920 | ||
1921 | return hdev->vhost_ops->vhost_set_vring_enable(hdev, enable); | |
1922 | } | |
1923 | ||
b0b3db79 | 1924 | /* Host notifiers must be enabled at this point. */ |
4daa5054 | 1925 | int vhost_dev_start(struct vhost_dev *hdev, VirtIODevice *vdev, bool vrings) |
d5970055 MT |
1926 | { |
1927 | int i, r; | |
24f4fe34 | 1928 | |
8695de0f MAL |
1929 | /* should only be called after backend is connected */ |
1930 | assert(hdev->vhost_ops); | |
1931 | ||
4daa5054 | 1932 | trace_vhost_dev_start(hdev, vdev->name, vrings); |
a2761231 | 1933 | |
c255488d | 1934 | vdev->vhost_started = true; |
24f4fe34 | 1935 | hdev->started = true; |
c471ad0e | 1936 | hdev->vdev = vdev; |
24f4fe34 | 1937 | |
d5970055 MT |
1938 | r = vhost_dev_set_features(hdev, hdev->log_enabled); |
1939 | if (r < 0) { | |
54dd9321 | 1940 | goto fail_features; |
d5970055 | 1941 | } |
c471ad0e JW |
1942 | |
1943 | if (vhost_dev_has_iommu(hdev)) { | |
375f74f4 | 1944 | memory_listener_register(&hdev->iommu_listener, vdev->dma_as); |
c471ad0e JW |
1945 | } |
1946 | ||
21e70425 | 1947 | r = hdev->vhost_ops->vhost_set_mem_table(hdev, hdev->mem); |
d5970055 | 1948 | if (r < 0) { |
5d33ae4b | 1949 | VHOST_OPS_DEBUG(r, "vhost_set_mem_table failed"); |
54dd9321 | 1950 | goto fail_mem; |
d5970055 | 1951 | } |
d154e0ba | 1952 | for (i = 0; i < hdev->nvqs; ++i) { |
f56a1247 | 1953 | r = vhost_virtqueue_start(hdev, |
a9f98bb5 JW |
1954 | vdev, |
1955 | hdev->vqs + i, | |
1956 | hdev->vq_index + i); | |
d154e0ba MT |
1957 | if (r < 0) { |
1958 | goto fail_vq; | |
1959 | } | |
1960 | } | |
1961 | ||
f9a09ca3 CL |
1962 | r = event_notifier_init( |
1963 | &hdev->vqs[VHOST_QUEUE_NUM_CONFIG_INR].masked_config_notifier, 0); | |
1964 | if (r < 0) { | |
77ece20b PP |
1965 | VHOST_OPS_DEBUG(r, "event_notifier_init failed"); |
1966 | goto fail_vq; | |
f9a09ca3 CL |
1967 | } |
1968 | event_notifier_test_and_clear( | |
1969 | &hdev->vqs[VHOST_QUEUE_NUM_CONFIG_INR].masked_config_notifier); | |
1970 | if (!vdev->use_guest_notifier_mask) { | |
1971 | vhost_config_mask(hdev, vdev, true); | |
1972 | } | |
d5970055 | 1973 | if (hdev->log_enabled) { |
e05ca820 MT |
1974 | uint64_t log_base; |
1975 | ||
d5970055 | 1976 | hdev->log_size = vhost_get_log_size(hdev); |
15324404 MAL |
1977 | hdev->log = vhost_log_get(hdev->log_size, |
1978 | vhost_dev_log_is_shared(hdev)); | |
309750fa | 1979 | log_base = (uintptr_t)hdev->log->log; |
c2bea314 | 1980 | r = hdev->vhost_ops->vhost_set_log_base(hdev, |
9a78a5dd MAL |
1981 | hdev->log_size ? log_base : 0, |
1982 | hdev->log); | |
d5970055 | 1983 | if (r < 0) { |
5d33ae4b | 1984 | VHOST_OPS_DEBUG(r, "vhost_set_log_base failed"); |
54dd9321 | 1985 | goto fail_log; |
d5970055 MT |
1986 | } |
1987 | } | |
4daa5054 SG |
1988 | if (vrings) { |
1989 | r = vhost_dev_set_vring_enable(hdev, true); | |
1990 | if (r) { | |
1991 | goto fail_log; | |
1992 | } | |
1993 | } | |
ca71db43 CL |
1994 | if (hdev->vhost_ops->vhost_dev_start) { |
1995 | r = hdev->vhost_ops->vhost_dev_start(hdev, true); | |
1996 | if (r) { | |
4daa5054 | 1997 | goto fail_start; |
ca71db43 CL |
1998 | } |
1999 | } | |
3f63b4c6 JW |
2000 | if (vhost_dev_has_iommu(hdev) && |
2001 | hdev->vhost_ops->vhost_set_iotlb_callback) { | |
2002 | hdev->vhost_ops->vhost_set_iotlb_callback(hdev, true); | |
c471ad0e JW |
2003 | |
2004 | /* Update used ring information for IOTLB to work correctly, | |
2005 | * vhost-kernel code requires for this.*/ | |
2006 | for (i = 0; i < hdev->nvqs; ++i) { | |
2007 | struct vhost_virtqueue *vq = hdev->vqs + i; | |
2008 | vhost_device_iotlb_miss(hdev, vq->used_phys, true); | |
2009 | } | |
2010 | } | |
f9a09ca3 | 2011 | vhost_start_config_intr(hdev); |
d5970055 | 2012 | return 0; |
4daa5054 SG |
2013 | fail_start: |
2014 | if (vrings) { | |
2015 | vhost_dev_set_vring_enable(hdev, false); | |
2016 | } | |
54dd9321 | 2017 | fail_log: |
24bfa207 | 2018 | vhost_log_put(hdev, false); |
d5970055 MT |
2019 | fail_vq: |
2020 | while (--i >= 0) { | |
f56a1247 | 2021 | vhost_virtqueue_stop(hdev, |
a9f98bb5 JW |
2022 | vdev, |
2023 | hdev->vqs + i, | |
2024 | hdev->vq_index + i); | |
d5970055 | 2025 | } |
c471ad0e | 2026 | |
54dd9321 | 2027 | fail_mem: |
1e3ffb34 PP |
2028 | if (vhost_dev_has_iommu(hdev)) { |
2029 | memory_listener_unregister(&hdev->iommu_listener); | |
2030 | } | |
54dd9321 | 2031 | fail_features: |
c255488d | 2032 | vdev->vhost_started = false; |
24f4fe34 | 2033 | hdev->started = false; |
d5970055 MT |
2034 | return r; |
2035 | } | |
2036 | ||
b0b3db79 | 2037 | /* Host notifiers must be enabled at this point. */ |
4daa5054 | 2038 | void vhost_dev_stop(struct vhost_dev *hdev, VirtIODevice *vdev, bool vrings) |
d5970055 | 2039 | { |
a9f98bb5 | 2040 | int i; |
54dd9321 | 2041 | |
8695de0f MAL |
2042 | /* should only be called after backend is connected */ |
2043 | assert(hdev->vhost_ops); | |
f9a09ca3 CL |
2044 | event_notifier_test_and_clear( |
2045 | &hdev->vqs[VHOST_QUEUE_NUM_CONFIG_INR].masked_config_notifier); | |
2046 | event_notifier_test_and_clear(&vdev->config_notifier); | |
8695de0f | 2047 | |
4daa5054 | 2048 | trace_vhost_dev_stop(hdev, vdev->name, vrings); |
a2761231 | 2049 | |
ca71db43 CL |
2050 | if (hdev->vhost_ops->vhost_dev_start) { |
2051 | hdev->vhost_ops->vhost_dev_start(hdev, false); | |
2052 | } | |
4daa5054 SG |
2053 | if (vrings) { |
2054 | vhost_dev_set_vring_enable(hdev, false); | |
2055 | } | |
d5970055 | 2056 | for (i = 0; i < hdev->nvqs; ++i) { |
f56a1247 | 2057 | vhost_virtqueue_stop(hdev, |
a9f98bb5 JW |
2058 | vdev, |
2059 | hdev->vqs + i, | |
2060 | hdev->vq_index + i); | |
d5970055 | 2061 | } |
c3716f26 EP |
2062 | if (hdev->vhost_ops->vhost_reset_status) { |
2063 | hdev->vhost_ops->vhost_reset_status(hdev); | |
2064 | } | |
54dd9321 | 2065 | |
c471ad0e | 2066 | if (vhost_dev_has_iommu(hdev)) { |
3f63b4c6 JW |
2067 | if (hdev->vhost_ops->vhost_set_iotlb_callback) { |
2068 | hdev->vhost_ops->vhost_set_iotlb_callback(hdev, false); | |
2069 | } | |
375f74f4 | 2070 | memory_listener_unregister(&hdev->iommu_listener); |
c471ad0e | 2071 | } |
f9a09ca3 | 2072 | vhost_stop_config_intr(hdev); |
309750fa | 2073 | vhost_log_put(hdev, true); |
d5970055 | 2074 | hdev->started = false; |
c255488d | 2075 | vdev->vhost_started = false; |
c471ad0e | 2076 | hdev->vdev = NULL; |
d5970055 | 2077 | } |
950d94ba MAL |
2078 | |
2079 | int vhost_net_set_backend(struct vhost_dev *hdev, | |
2080 | struct vhost_vring_file *file) | |
2081 | { | |
2082 | if (hdev->vhost_ops->vhost_net_set_backend) { | |
2083 | return hdev->vhost_ops->vhost_net_set_backend(hdev, file); | |
2084 | } | |
2085 | ||
5d33ae4b | 2086 | return -ENOSYS; |
950d94ba | 2087 | } |