]>
Commit | Line | Data |
---|---|---|
d5970055 MT |
1 | /* |
2 | * vhost support | |
3 | * | |
4 | * Copyright Red Hat, Inc. 2010 | |
5 | * | |
6 | * Authors: | |
7 | * Michael S. Tsirkin <mst@redhat.com> | |
8 | * | |
9 | * This work is licensed under the terms of the GNU GPL, version 2. See | |
10 | * the COPYING file in the top-level directory. | |
6b620ca3 PB |
11 | * |
12 | * Contributions after 2012-01-13 are licensed under the terms of the | |
13 | * GNU GPL, version 2 or (at your option) any later version. | |
d5970055 MT |
14 | */ |
15 | ||
0d09e41a | 16 | #include "hw/virtio/vhost.h" |
d5970055 | 17 | #include "hw/hw.h" |
5444e768 | 18 | #include "qemu/atomic.h" |
1de7afc9 | 19 | #include "qemu/range.h" |
11078ae3 | 20 | #include <linux/vhost.h> |
022c62cb | 21 | #include "exec/address-spaces.h" |
1c819449 | 22 | #include "hw/virtio/virtio-bus.h" |
7145872e | 23 | #include "migration/migration.h" |
d5970055 | 24 | |
309750fa JW |
25 | static struct vhost_log *vhost_log; |
26 | ||
d5970055 | 27 | static void vhost_dev_sync_region(struct vhost_dev *dev, |
2817b260 | 28 | MemoryRegionSection *section, |
d5970055 MT |
29 | uint64_t mfirst, uint64_t mlast, |
30 | uint64_t rfirst, uint64_t rlast) | |
31 | { | |
309750fa JW |
32 | vhost_log_chunk_t *log = dev->log->log; |
33 | ||
d5970055 MT |
34 | uint64_t start = MAX(mfirst, rfirst); |
35 | uint64_t end = MIN(mlast, rlast); | |
309750fa JW |
36 | vhost_log_chunk_t *from = log + start / VHOST_LOG_CHUNK; |
37 | vhost_log_chunk_t *to = log + end / VHOST_LOG_CHUNK + 1; | |
d5970055 MT |
38 | uint64_t addr = (start / VHOST_LOG_CHUNK) * VHOST_LOG_CHUNK; |
39 | ||
d5970055 MT |
40 | if (end < start) { |
41 | return; | |
42 | } | |
e314672a | 43 | assert(end / VHOST_LOG_CHUNK < dev->log_size); |
fbbaf9ae | 44 | assert(start / VHOST_LOG_CHUNK < dev->log_size); |
e314672a | 45 | |
d5970055 MT |
46 | for (;from < to; ++from) { |
47 | vhost_log_chunk_t log; | |
d5970055 MT |
48 | /* We first check with non-atomic: much cheaper, |
49 | * and we expect non-dirty to be the common case. */ | |
50 | if (!*from) { | |
0c600ce2 | 51 | addr += VHOST_LOG_CHUNK; |
d5970055 MT |
52 | continue; |
53 | } | |
5444e768 PB |
54 | /* Data must be read atomically. We don't really need barrier semantics |
55 | * but it's easier to use atomic_* than roll our own. */ | |
56 | log = atomic_xchg(from, 0); | |
747eb78b NC |
57 | while (log) { |
58 | int bit = ctzl(log); | |
6b37a23d MT |
59 | hwaddr page_addr; |
60 | hwaddr section_offset; | |
61 | hwaddr mr_offset; | |
6b37a23d MT |
62 | page_addr = addr + bit * VHOST_LOG_PAGE; |
63 | section_offset = page_addr - section->offset_within_address_space; | |
64 | mr_offset = section_offset + section->offset_within_region; | |
65 | memory_region_set_dirty(section->mr, mr_offset, VHOST_LOG_PAGE); | |
d5970055 MT |
66 | log &= ~(0x1ull << bit); |
67 | } | |
68 | addr += VHOST_LOG_CHUNK; | |
69 | } | |
70 | } | |
71 | ||
04097f7c | 72 | static int vhost_sync_dirty_bitmap(struct vhost_dev *dev, |
2817b260 | 73 | MemoryRegionSection *section, |
6b37a23d MT |
74 | hwaddr first, |
75 | hwaddr last) | |
d5970055 | 76 | { |
d5970055 | 77 | int i; |
6b37a23d MT |
78 | hwaddr start_addr; |
79 | hwaddr end_addr; | |
04097f7c | 80 | |
d5970055 MT |
81 | if (!dev->log_enabled || !dev->started) { |
82 | return 0; | |
83 | } | |
6b37a23d | 84 | start_addr = section->offset_within_address_space; |
052e87b0 | 85 | end_addr = range_get_last(start_addr, int128_get64(section->size)); |
6b37a23d MT |
86 | start_addr = MAX(first, start_addr); |
87 | end_addr = MIN(last, end_addr); | |
88 | ||
d5970055 MT |
89 | for (i = 0; i < dev->mem->nregions; ++i) { |
90 | struct vhost_memory_region *reg = dev->mem->regions + i; | |
2817b260 | 91 | vhost_dev_sync_region(dev, section, start_addr, end_addr, |
d5970055 MT |
92 | reg->guest_phys_addr, |
93 | range_get_last(reg->guest_phys_addr, | |
94 | reg->memory_size)); | |
95 | } | |
96 | for (i = 0; i < dev->nvqs; ++i) { | |
97 | struct vhost_virtqueue *vq = dev->vqs + i; | |
2817b260 | 98 | vhost_dev_sync_region(dev, section, start_addr, end_addr, vq->used_phys, |
d5970055 MT |
99 | range_get_last(vq->used_phys, vq->used_size)); |
100 | } | |
101 | return 0; | |
102 | } | |
103 | ||
04097f7c AK |
104 | static void vhost_log_sync(MemoryListener *listener, |
105 | MemoryRegionSection *section) | |
106 | { | |
107 | struct vhost_dev *dev = container_of(listener, struct vhost_dev, | |
108 | memory_listener); | |
6b37a23d MT |
109 | vhost_sync_dirty_bitmap(dev, section, 0x0, ~0x0ULL); |
110 | } | |
04097f7c | 111 | |
6b37a23d MT |
112 | static void vhost_log_sync_range(struct vhost_dev *dev, |
113 | hwaddr first, hwaddr last) | |
114 | { | |
115 | int i; | |
116 | /* FIXME: this is N^2 in number of sections */ | |
117 | for (i = 0; i < dev->n_mem_sections; ++i) { | |
118 | MemoryRegionSection *section = &dev->mem_sections[i]; | |
119 | vhost_sync_dirty_bitmap(dev, section, first, last); | |
120 | } | |
04097f7c AK |
121 | } |
122 | ||
d5970055 MT |
123 | /* Assign/unassign. Keep an unsorted array of non-overlapping |
124 | * memory regions in dev->mem. */ | |
125 | static void vhost_dev_unassign_memory(struct vhost_dev *dev, | |
126 | uint64_t start_addr, | |
127 | uint64_t size) | |
128 | { | |
129 | int from, to, n = dev->mem->nregions; | |
130 | /* Track overlapping/split regions for sanity checking. */ | |
131 | int overlap_start = 0, overlap_end = 0, overlap_middle = 0, split = 0; | |
132 | ||
133 | for (from = 0, to = 0; from < n; ++from, ++to) { | |
134 | struct vhost_memory_region *reg = dev->mem->regions + to; | |
135 | uint64_t reglast; | |
136 | uint64_t memlast; | |
137 | uint64_t change; | |
138 | ||
139 | /* clone old region */ | |
140 | if (to != from) { | |
141 | memcpy(reg, dev->mem->regions + from, sizeof *reg); | |
142 | } | |
143 | ||
144 | /* No overlap is simple */ | |
145 | if (!ranges_overlap(reg->guest_phys_addr, reg->memory_size, | |
146 | start_addr, size)) { | |
147 | continue; | |
148 | } | |
149 | ||
150 | /* Split only happens if supplied region | |
151 | * is in the middle of an existing one. Thus it can not | |
152 | * overlap with any other existing region. */ | |
153 | assert(!split); | |
154 | ||
155 | reglast = range_get_last(reg->guest_phys_addr, reg->memory_size); | |
156 | memlast = range_get_last(start_addr, size); | |
157 | ||
158 | /* Remove whole region */ | |
159 | if (start_addr <= reg->guest_phys_addr && memlast >= reglast) { | |
160 | --dev->mem->nregions; | |
161 | --to; | |
d5970055 MT |
162 | ++overlap_middle; |
163 | continue; | |
164 | } | |
165 | ||
166 | /* Shrink region */ | |
167 | if (memlast >= reglast) { | |
168 | reg->memory_size = start_addr - reg->guest_phys_addr; | |
169 | assert(reg->memory_size); | |
170 | assert(!overlap_end); | |
171 | ++overlap_end; | |
172 | continue; | |
173 | } | |
174 | ||
175 | /* Shift region */ | |
176 | if (start_addr <= reg->guest_phys_addr) { | |
177 | change = memlast + 1 - reg->guest_phys_addr; | |
178 | reg->memory_size -= change; | |
179 | reg->guest_phys_addr += change; | |
180 | reg->userspace_addr += change; | |
181 | assert(reg->memory_size); | |
182 | assert(!overlap_start); | |
183 | ++overlap_start; | |
184 | continue; | |
185 | } | |
186 | ||
187 | /* This only happens if supplied region | |
188 | * is in the middle of an existing one. Thus it can not | |
189 | * overlap with any other existing region. */ | |
190 | assert(!overlap_start); | |
191 | assert(!overlap_end); | |
192 | assert(!overlap_middle); | |
193 | /* Split region: shrink first part, shift second part. */ | |
194 | memcpy(dev->mem->regions + n, reg, sizeof *reg); | |
195 | reg->memory_size = start_addr - reg->guest_phys_addr; | |
196 | assert(reg->memory_size); | |
197 | change = memlast + 1 - reg->guest_phys_addr; | |
198 | reg = dev->mem->regions + n; | |
199 | reg->memory_size -= change; | |
200 | assert(reg->memory_size); | |
201 | reg->guest_phys_addr += change; | |
202 | reg->userspace_addr += change; | |
203 | /* Never add more than 1 region */ | |
204 | assert(dev->mem->nregions == n); | |
205 | ++dev->mem->nregions; | |
206 | ++split; | |
207 | } | |
208 | } | |
209 | ||
210 | /* Called after unassign, so no regions overlap the given range. */ | |
211 | static void vhost_dev_assign_memory(struct vhost_dev *dev, | |
212 | uint64_t start_addr, | |
213 | uint64_t size, | |
214 | uint64_t uaddr) | |
215 | { | |
216 | int from, to; | |
217 | struct vhost_memory_region *merged = NULL; | |
218 | for (from = 0, to = 0; from < dev->mem->nregions; ++from, ++to) { | |
219 | struct vhost_memory_region *reg = dev->mem->regions + to; | |
220 | uint64_t prlast, urlast; | |
221 | uint64_t pmlast, umlast; | |
222 | uint64_t s, e, u; | |
223 | ||
224 | /* clone old region */ | |
225 | if (to != from) { | |
226 | memcpy(reg, dev->mem->regions + from, sizeof *reg); | |
227 | } | |
228 | prlast = range_get_last(reg->guest_phys_addr, reg->memory_size); | |
229 | pmlast = range_get_last(start_addr, size); | |
230 | urlast = range_get_last(reg->userspace_addr, reg->memory_size); | |
231 | umlast = range_get_last(uaddr, size); | |
232 | ||
233 | /* check for overlapping regions: should never happen. */ | |
234 | assert(prlast < start_addr || pmlast < reg->guest_phys_addr); | |
235 | /* Not an adjacent or overlapping region - do not merge. */ | |
236 | if ((prlast + 1 != start_addr || urlast + 1 != uaddr) && | |
237 | (pmlast + 1 != reg->guest_phys_addr || | |
238 | umlast + 1 != reg->userspace_addr)) { | |
239 | continue; | |
240 | } | |
241 | ||
242 | if (merged) { | |
243 | --to; | |
244 | assert(to >= 0); | |
245 | } else { | |
246 | merged = reg; | |
247 | } | |
248 | u = MIN(uaddr, reg->userspace_addr); | |
249 | s = MIN(start_addr, reg->guest_phys_addr); | |
250 | e = MAX(pmlast, prlast); | |
251 | uaddr = merged->userspace_addr = u; | |
252 | start_addr = merged->guest_phys_addr = s; | |
253 | size = merged->memory_size = e - s + 1; | |
254 | assert(merged->memory_size); | |
255 | } | |
256 | ||
257 | if (!merged) { | |
258 | struct vhost_memory_region *reg = dev->mem->regions + to; | |
259 | memset(reg, 0, sizeof *reg); | |
260 | reg->memory_size = size; | |
261 | assert(reg->memory_size); | |
262 | reg->guest_phys_addr = start_addr; | |
263 | reg->userspace_addr = uaddr; | |
264 | ++to; | |
265 | } | |
266 | assert(to <= dev->mem->nregions + 1); | |
267 | dev->mem->nregions = to; | |
268 | } | |
269 | ||
270 | static uint64_t vhost_get_log_size(struct vhost_dev *dev) | |
271 | { | |
272 | uint64_t log_size = 0; | |
273 | int i; | |
274 | for (i = 0; i < dev->mem->nregions; ++i) { | |
275 | struct vhost_memory_region *reg = dev->mem->regions + i; | |
276 | uint64_t last = range_get_last(reg->guest_phys_addr, | |
277 | reg->memory_size); | |
278 | log_size = MAX(log_size, last / VHOST_LOG_CHUNK + 1); | |
279 | } | |
280 | for (i = 0; i < dev->nvqs; ++i) { | |
281 | struct vhost_virtqueue *vq = dev->vqs + i; | |
282 | uint64_t last = vq->used_phys + vq->used_size - 1; | |
283 | log_size = MAX(log_size, last / VHOST_LOG_CHUNK + 1); | |
284 | } | |
285 | return log_size; | |
286 | } | |
309750fa JW |
287 | static struct vhost_log *vhost_log_alloc(uint64_t size) |
288 | { | |
289 | struct vhost_log *log = g_malloc0(sizeof *log + size * sizeof(*(log->log))); | |
290 | ||
291 | log->size = size; | |
292 | log->refcnt = 1; | |
293 | ||
294 | return log; | |
295 | } | |
296 | ||
297 | static struct vhost_log *vhost_log_get(uint64_t size) | |
298 | { | |
299 | if (!vhost_log || vhost_log->size != size) { | |
300 | vhost_log = vhost_log_alloc(size); | |
301 | } else { | |
302 | ++vhost_log->refcnt; | |
303 | } | |
304 | ||
305 | return vhost_log; | |
306 | } | |
307 | ||
308 | static void vhost_log_put(struct vhost_dev *dev, bool sync) | |
309 | { | |
310 | struct vhost_log *log = dev->log; | |
311 | ||
312 | if (!log) { | |
313 | return; | |
314 | } | |
315 | ||
316 | --log->refcnt; | |
317 | if (log->refcnt == 0) { | |
318 | /* Sync only the range covered by the old log */ | |
319 | if (dev->log_size && sync) { | |
320 | vhost_log_sync_range(dev, 0, dev->log_size * VHOST_LOG_CHUNK - 1); | |
321 | } | |
322 | if (vhost_log == log) { | |
323 | vhost_log = NULL; | |
324 | } | |
325 | g_free(log); | |
326 | } | |
327 | } | |
d5970055 MT |
328 | |
329 | static inline void vhost_dev_log_resize(struct vhost_dev* dev, uint64_t size) | |
330 | { | |
309750fa JW |
331 | struct vhost_log *log = vhost_log_get(size); |
332 | uint64_t log_base = (uintptr_t)log->log; | |
6b37a23d | 333 | int r; |
6528499f | 334 | |
24d1eb33 | 335 | r = dev->vhost_ops->vhost_call(dev, VHOST_SET_LOG_BASE, &log_base); |
d5970055 | 336 | assert(r >= 0); |
309750fa | 337 | vhost_log_put(dev, true); |
d5970055 MT |
338 | dev->log = log; |
339 | dev->log_size = size; | |
340 | } | |
341 | ||
342 | static int vhost_verify_ring_mappings(struct vhost_dev *dev, | |
343 | uint64_t start_addr, | |
344 | uint64_t size) | |
345 | { | |
346 | int i; | |
8617343f MT |
347 | int r = 0; |
348 | ||
349 | for (i = 0; !r && i < dev->nvqs; ++i) { | |
d5970055 | 350 | struct vhost_virtqueue *vq = dev->vqs + i; |
a8170e5e | 351 | hwaddr l; |
d5970055 MT |
352 | void *p; |
353 | ||
354 | if (!ranges_overlap(start_addr, size, vq->ring_phys, vq->ring_size)) { | |
355 | continue; | |
356 | } | |
357 | l = vq->ring_size; | |
358 | p = cpu_physical_memory_map(vq->ring_phys, &l, 1); | |
359 | if (!p || l != vq->ring_size) { | |
360 | fprintf(stderr, "Unable to map ring buffer for ring %d\n", i); | |
8617343f | 361 | r = -ENOMEM; |
d5970055 MT |
362 | } |
363 | if (p != vq->ring) { | |
364 | fprintf(stderr, "Ring buffer relocated for ring %d\n", i); | |
8617343f | 365 | r = -EBUSY; |
d5970055 MT |
366 | } |
367 | cpu_physical_memory_unmap(p, l, 0, 0); | |
368 | } | |
8617343f | 369 | return r; |
d5970055 MT |
370 | } |
371 | ||
4e789564 MT |
372 | static struct vhost_memory_region *vhost_dev_find_reg(struct vhost_dev *dev, |
373 | uint64_t start_addr, | |
374 | uint64_t size) | |
375 | { | |
376 | int i, n = dev->mem->nregions; | |
377 | for (i = 0; i < n; ++i) { | |
378 | struct vhost_memory_region *reg = dev->mem->regions + i; | |
379 | if (ranges_overlap(reg->guest_phys_addr, reg->memory_size, | |
380 | start_addr, size)) { | |
381 | return reg; | |
382 | } | |
383 | } | |
384 | return NULL; | |
385 | } | |
386 | ||
387 | static bool vhost_dev_cmp_memory(struct vhost_dev *dev, | |
388 | uint64_t start_addr, | |
389 | uint64_t size, | |
390 | uint64_t uaddr) | |
391 | { | |
392 | struct vhost_memory_region *reg = vhost_dev_find_reg(dev, start_addr, size); | |
393 | uint64_t reglast; | |
394 | uint64_t memlast; | |
395 | ||
396 | if (!reg) { | |
397 | return true; | |
398 | } | |
399 | ||
400 | reglast = range_get_last(reg->guest_phys_addr, reg->memory_size); | |
401 | memlast = range_get_last(start_addr, size); | |
402 | ||
403 | /* Need to extend region? */ | |
404 | if (start_addr < reg->guest_phys_addr || memlast > reglast) { | |
405 | return true; | |
406 | } | |
407 | /* userspace_addr changed? */ | |
408 | return uaddr != reg->userspace_addr + start_addr - reg->guest_phys_addr; | |
409 | } | |
410 | ||
04097f7c AK |
411 | static void vhost_set_memory(MemoryListener *listener, |
412 | MemoryRegionSection *section, | |
413 | bool add) | |
d5970055 | 414 | { |
04097f7c AK |
415 | struct vhost_dev *dev = container_of(listener, struct vhost_dev, |
416 | memory_listener); | |
a8170e5e | 417 | hwaddr start_addr = section->offset_within_address_space; |
052e87b0 | 418 | ram_addr_t size = int128_get64(section->size); |
04097f7c | 419 | bool log_dirty = memory_region_is_logging(section->mr); |
d5970055 MT |
420 | int s = offsetof(struct vhost_memory, regions) + |
421 | (dev->mem->nregions + 1) * sizeof dev->mem->regions[0]; | |
04097f7c AK |
422 | void *ram; |
423 | ||
7267c094 | 424 | dev->mem = g_realloc(dev->mem, s); |
d5970055 | 425 | |
f5a4e64f | 426 | if (log_dirty) { |
04097f7c | 427 | add = false; |
f5a4e64f MT |
428 | } |
429 | ||
d5970055 MT |
430 | assert(size); |
431 | ||
4e789564 | 432 | /* Optimize no-change case. At least cirrus_vga does this a lot at this time. */ |
d743c382 | 433 | ram = memory_region_get_ram_ptr(section->mr) + section->offset_within_region; |
04097f7c AK |
434 | if (add) { |
435 | if (!vhost_dev_cmp_memory(dev, start_addr, size, (uintptr_t)ram)) { | |
4e789564 MT |
436 | /* Region exists with same address. Nothing to do. */ |
437 | return; | |
438 | } | |
439 | } else { | |
440 | if (!vhost_dev_find_reg(dev, start_addr, size)) { | |
441 | /* Removing region that we don't access. Nothing to do. */ | |
442 | return; | |
443 | } | |
444 | } | |
445 | ||
d5970055 | 446 | vhost_dev_unassign_memory(dev, start_addr, size); |
04097f7c | 447 | if (add) { |
d5970055 | 448 | /* Add given mapping, merging adjacent regions if any */ |
04097f7c | 449 | vhost_dev_assign_memory(dev, start_addr, size, (uintptr_t)ram); |
d5970055 MT |
450 | } else { |
451 | /* Remove old mapping for this memory, if any. */ | |
452 | vhost_dev_unassign_memory(dev, start_addr, size); | |
453 | } | |
af603142 NB |
454 | dev->mem_changed_start_addr = MIN(dev->mem_changed_start_addr, start_addr); |
455 | dev->mem_changed_end_addr = MAX(dev->mem_changed_end_addr, start_addr + size - 1); | |
456 | dev->memory_changed = true; | |
457 | } | |
458 | ||
459 | static bool vhost_section(MemoryRegionSection *section) | |
460 | { | |
461 | return memory_region_is_ram(section->mr); | |
462 | } | |
463 | ||
464 | static void vhost_begin(MemoryListener *listener) | |
465 | { | |
466 | struct vhost_dev *dev = container_of(listener, struct vhost_dev, | |
467 | memory_listener); | |
468 | dev->mem_changed_end_addr = 0; | |
469 | dev->mem_changed_start_addr = -1; | |
470 | } | |
d5970055 | 471 | |
af603142 NB |
472 | static void vhost_commit(MemoryListener *listener) |
473 | { | |
474 | struct vhost_dev *dev = container_of(listener, struct vhost_dev, | |
475 | memory_listener); | |
476 | hwaddr start_addr = 0; | |
477 | ram_addr_t size = 0; | |
478 | uint64_t log_size; | |
479 | int r; | |
480 | ||
481 | if (!dev->memory_changed) { | |
482 | return; | |
483 | } | |
d5970055 MT |
484 | if (!dev->started) { |
485 | return; | |
486 | } | |
af603142 NB |
487 | if (dev->mem_changed_start_addr > dev->mem_changed_end_addr) { |
488 | return; | |
489 | } | |
d5970055 MT |
490 | |
491 | if (dev->started) { | |
af603142 NB |
492 | start_addr = dev->mem_changed_start_addr; |
493 | size = dev->mem_changed_end_addr - dev->mem_changed_start_addr + 1; | |
494 | ||
d5970055 MT |
495 | r = vhost_verify_ring_mappings(dev, start_addr, size); |
496 | assert(r >= 0); | |
497 | } | |
498 | ||
499 | if (!dev->log_enabled) { | |
24d1eb33 | 500 | r = dev->vhost_ops->vhost_call(dev, VHOST_SET_MEM_TABLE, dev->mem); |
d5970055 | 501 | assert(r >= 0); |
af603142 | 502 | dev->memory_changed = false; |
d5970055 MT |
503 | return; |
504 | } | |
505 | log_size = vhost_get_log_size(dev); | |
506 | /* We allocate an extra 4K bytes to log, | |
507 | * to reduce the * number of reallocations. */ | |
508 | #define VHOST_LOG_BUFFER (0x1000 / sizeof *dev->log) | |
509 | /* To log more, must increase log size before table update. */ | |
510 | if (dev->log_size < log_size) { | |
511 | vhost_dev_log_resize(dev, log_size + VHOST_LOG_BUFFER); | |
512 | } | |
24d1eb33 | 513 | r = dev->vhost_ops->vhost_call(dev, VHOST_SET_MEM_TABLE, dev->mem); |
d5970055 MT |
514 | assert(r >= 0); |
515 | /* To log less, can only decrease log size after table update. */ | |
516 | if (dev->log_size > log_size + VHOST_LOG_BUFFER) { | |
517 | vhost_dev_log_resize(dev, log_size); | |
518 | } | |
af603142 | 519 | dev->memory_changed = false; |
50c1e149 AK |
520 | } |
521 | ||
04097f7c AK |
522 | static void vhost_region_add(MemoryListener *listener, |
523 | MemoryRegionSection *section) | |
524 | { | |
2817b260 AK |
525 | struct vhost_dev *dev = container_of(listener, struct vhost_dev, |
526 | memory_listener); | |
527 | ||
c49450b9 AK |
528 | if (!vhost_section(section)) { |
529 | return; | |
530 | } | |
531 | ||
2817b260 AK |
532 | ++dev->n_mem_sections; |
533 | dev->mem_sections = g_renew(MemoryRegionSection, dev->mem_sections, | |
534 | dev->n_mem_sections); | |
535 | dev->mem_sections[dev->n_mem_sections - 1] = *section; | |
dfde4e6e | 536 | memory_region_ref(section->mr); |
04097f7c AK |
537 | vhost_set_memory(listener, section, true); |
538 | } | |
539 | ||
540 | static void vhost_region_del(MemoryListener *listener, | |
541 | MemoryRegionSection *section) | |
542 | { | |
2817b260 AK |
543 | struct vhost_dev *dev = container_of(listener, struct vhost_dev, |
544 | memory_listener); | |
545 | int i; | |
546 | ||
c49450b9 AK |
547 | if (!vhost_section(section)) { |
548 | return; | |
549 | } | |
550 | ||
04097f7c | 551 | vhost_set_memory(listener, section, false); |
dfde4e6e | 552 | memory_region_unref(section->mr); |
2817b260 AK |
553 | for (i = 0; i < dev->n_mem_sections; ++i) { |
554 | if (dev->mem_sections[i].offset_within_address_space | |
555 | == section->offset_within_address_space) { | |
556 | --dev->n_mem_sections; | |
557 | memmove(&dev->mem_sections[i], &dev->mem_sections[i+1], | |
637f7a6a | 558 | (dev->n_mem_sections - i) * sizeof(*dev->mem_sections)); |
2817b260 AK |
559 | break; |
560 | } | |
561 | } | |
04097f7c AK |
562 | } |
563 | ||
50c1e149 AK |
564 | static void vhost_region_nop(MemoryListener *listener, |
565 | MemoryRegionSection *section) | |
566 | { | |
567 | } | |
568 | ||
d5970055 MT |
569 | static int vhost_virtqueue_set_addr(struct vhost_dev *dev, |
570 | struct vhost_virtqueue *vq, | |
571 | unsigned idx, bool enable_log) | |
572 | { | |
573 | struct vhost_vring_addr addr = { | |
574 | .index = idx, | |
2b3af999 SW |
575 | .desc_user_addr = (uint64_t)(unsigned long)vq->desc, |
576 | .avail_user_addr = (uint64_t)(unsigned long)vq->avail, | |
577 | .used_user_addr = (uint64_t)(unsigned long)vq->used, | |
d5970055 MT |
578 | .log_guest_addr = vq->used_phys, |
579 | .flags = enable_log ? (1 << VHOST_VRING_F_LOG) : 0, | |
580 | }; | |
24d1eb33 | 581 | int r = dev->vhost_ops->vhost_call(dev, VHOST_SET_VRING_ADDR, &addr); |
d5970055 MT |
582 | if (r < 0) { |
583 | return -errno; | |
584 | } | |
585 | return 0; | |
586 | } | |
587 | ||
588 | static int vhost_dev_set_features(struct vhost_dev *dev, bool enable_log) | |
589 | { | |
590 | uint64_t features = dev->acked_features; | |
591 | int r; | |
592 | if (enable_log) { | |
593 | features |= 0x1 << VHOST_F_LOG_ALL; | |
594 | } | |
24d1eb33 | 595 | r = dev->vhost_ops->vhost_call(dev, VHOST_SET_FEATURES, &features); |
d5970055 MT |
596 | return r < 0 ? -errno : 0; |
597 | } | |
598 | ||
599 | static int vhost_dev_set_log(struct vhost_dev *dev, bool enable_log) | |
600 | { | |
601 | int r, t, i; | |
602 | r = vhost_dev_set_features(dev, enable_log); | |
603 | if (r < 0) { | |
604 | goto err_features; | |
605 | } | |
606 | for (i = 0; i < dev->nvqs; ++i) { | |
607 | r = vhost_virtqueue_set_addr(dev, dev->vqs + i, i, | |
608 | enable_log); | |
609 | if (r < 0) { | |
610 | goto err_vq; | |
611 | } | |
612 | } | |
613 | return 0; | |
614 | err_vq: | |
615 | for (; i >= 0; --i) { | |
616 | t = vhost_virtqueue_set_addr(dev, dev->vqs + i, i, | |
617 | dev->log_enabled); | |
618 | assert(t >= 0); | |
619 | } | |
620 | t = vhost_dev_set_features(dev, dev->log_enabled); | |
621 | assert(t >= 0); | |
622 | err_features: | |
623 | return r; | |
624 | } | |
625 | ||
04097f7c | 626 | static int vhost_migration_log(MemoryListener *listener, int enable) |
d5970055 | 627 | { |
04097f7c AK |
628 | struct vhost_dev *dev = container_of(listener, struct vhost_dev, |
629 | memory_listener); | |
d5970055 MT |
630 | int r; |
631 | if (!!enable == dev->log_enabled) { | |
632 | return 0; | |
633 | } | |
634 | if (!dev->started) { | |
635 | dev->log_enabled = enable; | |
636 | return 0; | |
637 | } | |
638 | if (!enable) { | |
639 | r = vhost_dev_set_log(dev, false); | |
640 | if (r < 0) { | |
641 | return r; | |
642 | } | |
309750fa | 643 | vhost_log_put(dev, false); |
d5970055 MT |
644 | dev->log = NULL; |
645 | dev->log_size = 0; | |
646 | } else { | |
647 | vhost_dev_log_resize(dev, vhost_get_log_size(dev)); | |
648 | r = vhost_dev_set_log(dev, true); | |
649 | if (r < 0) { | |
650 | return r; | |
651 | } | |
652 | } | |
653 | dev->log_enabled = enable; | |
654 | return 0; | |
655 | } | |
656 | ||
04097f7c AK |
657 | static void vhost_log_global_start(MemoryListener *listener) |
658 | { | |
659 | int r; | |
660 | ||
661 | r = vhost_migration_log(listener, true); | |
662 | if (r < 0) { | |
663 | abort(); | |
664 | } | |
665 | } | |
666 | ||
667 | static void vhost_log_global_stop(MemoryListener *listener) | |
668 | { | |
669 | int r; | |
670 | ||
671 | r = vhost_migration_log(listener, false); | |
672 | if (r < 0) { | |
673 | abort(); | |
674 | } | |
675 | } | |
676 | ||
677 | static void vhost_log_start(MemoryListener *listener, | |
678 | MemoryRegionSection *section) | |
679 | { | |
680 | /* FIXME: implement */ | |
681 | } | |
682 | ||
683 | static void vhost_log_stop(MemoryListener *listener, | |
684 | MemoryRegionSection *section) | |
685 | { | |
686 | /* FIXME: implement */ | |
687 | } | |
688 | ||
f56a1247 | 689 | static int vhost_virtqueue_start(struct vhost_dev *dev, |
d5970055 MT |
690 | struct VirtIODevice *vdev, |
691 | struct vhost_virtqueue *vq, | |
692 | unsigned idx) | |
693 | { | |
a8170e5e | 694 | hwaddr s, l, a; |
d5970055 | 695 | int r; |
a9f98bb5 | 696 | int vhost_vq_index = idx - dev->vq_index; |
d5970055 | 697 | struct vhost_vring_file file = { |
a9f98bb5 | 698 | .index = vhost_vq_index |
d5970055 MT |
699 | }; |
700 | struct vhost_vring_state state = { | |
a9f98bb5 | 701 | .index = vhost_vq_index |
d5970055 MT |
702 | }; |
703 | struct VirtQueue *vvq = virtio_get_queue(vdev, idx); | |
704 | ||
a9f98bb5 JW |
705 | assert(idx >= dev->vq_index && idx < dev->vq_index + dev->nvqs); |
706 | ||
d5970055 | 707 | vq->num = state.num = virtio_queue_get_num(vdev, idx); |
24d1eb33 | 708 | r = dev->vhost_ops->vhost_call(dev, VHOST_SET_VRING_NUM, &state); |
d5970055 MT |
709 | if (r) { |
710 | return -errno; | |
711 | } | |
712 | ||
713 | state.num = virtio_queue_get_last_avail_idx(vdev, idx); | |
24d1eb33 | 714 | r = dev->vhost_ops->vhost_call(dev, VHOST_SET_VRING_BASE, &state); |
d5970055 MT |
715 | if (r) { |
716 | return -errno; | |
717 | } | |
718 | ||
719 | s = l = virtio_queue_get_desc_size(vdev, idx); | |
720 | a = virtio_queue_get_desc_addr(vdev, idx); | |
721 | vq->desc = cpu_physical_memory_map(a, &l, 0); | |
722 | if (!vq->desc || l != s) { | |
723 | r = -ENOMEM; | |
724 | goto fail_alloc_desc; | |
725 | } | |
726 | s = l = virtio_queue_get_avail_size(vdev, idx); | |
727 | a = virtio_queue_get_avail_addr(vdev, idx); | |
728 | vq->avail = cpu_physical_memory_map(a, &l, 0); | |
729 | if (!vq->avail || l != s) { | |
730 | r = -ENOMEM; | |
731 | goto fail_alloc_avail; | |
732 | } | |
733 | vq->used_size = s = l = virtio_queue_get_used_size(vdev, idx); | |
734 | vq->used_phys = a = virtio_queue_get_used_addr(vdev, idx); | |
735 | vq->used = cpu_physical_memory_map(a, &l, 1); | |
736 | if (!vq->used || l != s) { | |
737 | r = -ENOMEM; | |
738 | goto fail_alloc_used; | |
739 | } | |
740 | ||
741 | vq->ring_size = s = l = virtio_queue_get_ring_size(vdev, idx); | |
742 | vq->ring_phys = a = virtio_queue_get_ring_addr(vdev, idx); | |
743 | vq->ring = cpu_physical_memory_map(a, &l, 1); | |
744 | if (!vq->ring || l != s) { | |
745 | r = -ENOMEM; | |
746 | goto fail_alloc_ring; | |
747 | } | |
748 | ||
a9f98bb5 | 749 | r = vhost_virtqueue_set_addr(dev, vq, vhost_vq_index, dev->log_enabled); |
d5970055 MT |
750 | if (r < 0) { |
751 | r = -errno; | |
752 | goto fail_alloc; | |
753 | } | |
a9f98bb5 | 754 | |
d5970055 | 755 | file.fd = event_notifier_get_fd(virtio_queue_get_host_notifier(vvq)); |
24d1eb33 | 756 | r = dev->vhost_ops->vhost_call(dev, VHOST_SET_VRING_KICK, &file); |
d5970055 | 757 | if (r) { |
c8852121 | 758 | r = -errno; |
d5970055 MT |
759 | goto fail_kick; |
760 | } | |
761 | ||
f56a1247 MT |
762 | /* Clear and discard previous events if any. */ |
763 | event_notifier_test_and_clear(&vq->masked_notifier); | |
d5970055 MT |
764 | |
765 | return 0; | |
766 | ||
d5970055 | 767 | fail_kick: |
d5970055 MT |
768 | fail_alloc: |
769 | cpu_physical_memory_unmap(vq->ring, virtio_queue_get_ring_size(vdev, idx), | |
770 | 0, 0); | |
771 | fail_alloc_ring: | |
772 | cpu_physical_memory_unmap(vq->used, virtio_queue_get_used_size(vdev, idx), | |
773 | 0, 0); | |
774 | fail_alloc_used: | |
775 | cpu_physical_memory_unmap(vq->avail, virtio_queue_get_avail_size(vdev, idx), | |
776 | 0, 0); | |
777 | fail_alloc_avail: | |
778 | cpu_physical_memory_unmap(vq->desc, virtio_queue_get_desc_size(vdev, idx), | |
779 | 0, 0); | |
780 | fail_alloc_desc: | |
781 | return r; | |
782 | } | |
783 | ||
f56a1247 | 784 | static void vhost_virtqueue_stop(struct vhost_dev *dev, |
d5970055 MT |
785 | struct VirtIODevice *vdev, |
786 | struct vhost_virtqueue *vq, | |
787 | unsigned idx) | |
788 | { | |
789 | struct vhost_vring_state state = { | |
a9f98bb5 | 790 | .index = idx - dev->vq_index |
d5970055 MT |
791 | }; |
792 | int r; | |
a9f98bb5 | 793 | assert(idx >= dev->vq_index && idx < dev->vq_index + dev->nvqs); |
24d1eb33 | 794 | r = dev->vhost_ops->vhost_call(dev, VHOST_GET_VRING_BASE, &state); |
d5970055 MT |
795 | if (r < 0) { |
796 | fprintf(stderr, "vhost VQ %d ring restore failed: %d\n", idx, r); | |
797 | fflush(stderr); | |
798 | } | |
799 | virtio_queue_set_last_avail_idx(vdev, idx, state.num); | |
3561ba14 | 800 | virtio_queue_invalidate_signalled_used(vdev, idx); |
d5970055 MT |
801 | assert (r >= 0); |
802 | cpu_physical_memory_unmap(vq->ring, virtio_queue_get_ring_size(vdev, idx), | |
803 | 0, virtio_queue_get_ring_size(vdev, idx)); | |
804 | cpu_physical_memory_unmap(vq->used, virtio_queue_get_used_size(vdev, idx), | |
805 | 1, virtio_queue_get_used_size(vdev, idx)); | |
806 | cpu_physical_memory_unmap(vq->avail, virtio_queue_get_avail_size(vdev, idx), | |
807 | 0, virtio_queue_get_avail_size(vdev, idx)); | |
808 | cpu_physical_memory_unmap(vq->desc, virtio_queue_get_desc_size(vdev, idx), | |
809 | 0, virtio_queue_get_desc_size(vdev, idx)); | |
810 | } | |
811 | ||
80a1ea37 AK |
812 | static void vhost_eventfd_add(MemoryListener *listener, |
813 | MemoryRegionSection *section, | |
753d5e14 | 814 | bool match_data, uint64_t data, EventNotifier *e) |
80a1ea37 AK |
815 | { |
816 | } | |
817 | ||
818 | static void vhost_eventfd_del(MemoryListener *listener, | |
819 | MemoryRegionSection *section, | |
753d5e14 | 820 | bool match_data, uint64_t data, EventNotifier *e) |
80a1ea37 AK |
821 | { |
822 | } | |
823 | ||
f56a1247 MT |
824 | static int vhost_virtqueue_init(struct vhost_dev *dev, |
825 | struct vhost_virtqueue *vq, int n) | |
826 | { | |
827 | struct vhost_vring_file file = { | |
828 | .index = n, | |
829 | }; | |
830 | int r = event_notifier_init(&vq->masked_notifier, 0); | |
831 | if (r < 0) { | |
832 | return r; | |
833 | } | |
834 | ||
835 | file.fd = event_notifier_get_fd(&vq->masked_notifier); | |
24d1eb33 | 836 | r = dev->vhost_ops->vhost_call(dev, VHOST_SET_VRING_CALL, &file); |
f56a1247 MT |
837 | if (r) { |
838 | r = -errno; | |
839 | goto fail_call; | |
840 | } | |
841 | return 0; | |
842 | fail_call: | |
843 | event_notifier_cleanup(&vq->masked_notifier); | |
844 | return r; | |
845 | } | |
846 | ||
847 | static void vhost_virtqueue_cleanup(struct vhost_virtqueue *vq) | |
848 | { | |
849 | event_notifier_cleanup(&vq->masked_notifier); | |
850 | } | |
851 | ||
81647a65 | 852 | int vhost_dev_init(struct vhost_dev *hdev, void *opaque, |
1a1bfac9 | 853 | VhostBackendType backend_type, bool force) |
d5970055 MT |
854 | { |
855 | uint64_t features; | |
f56a1247 | 856 | int i, r; |
81647a65 | 857 | |
1a1bfac9 | 858 | if (vhost_set_backend_type(hdev, backend_type) < 0) { |
b19ca188 | 859 | close((uintptr_t)opaque); |
1a1bfac9 NN |
860 | return -1; |
861 | } | |
862 | ||
24d1eb33 | 863 | if (hdev->vhost_ops->vhost_backend_init(hdev, opaque) < 0) { |
b19ca188 | 864 | close((uintptr_t)opaque); |
24d1eb33 NN |
865 | return -errno; |
866 | } | |
867 | ||
868 | r = hdev->vhost_ops->vhost_call(hdev, VHOST_SET_OWNER, NULL); | |
d5970055 MT |
869 | if (r < 0) { |
870 | goto fail; | |
871 | } | |
872 | ||
24d1eb33 | 873 | r = hdev->vhost_ops->vhost_call(hdev, VHOST_GET_FEATURES, &features); |
d5970055 MT |
874 | if (r < 0) { |
875 | goto fail; | |
876 | } | |
f56a1247 MT |
877 | |
878 | for (i = 0; i < hdev->nvqs; ++i) { | |
879 | r = vhost_virtqueue_init(hdev, hdev->vqs + i, i); | |
880 | if (r < 0) { | |
881 | goto fail_vq; | |
882 | } | |
883 | } | |
d5970055 MT |
884 | hdev->features = features; |
885 | ||
04097f7c | 886 | hdev->memory_listener = (MemoryListener) { |
50c1e149 AK |
887 | .begin = vhost_begin, |
888 | .commit = vhost_commit, | |
04097f7c AK |
889 | .region_add = vhost_region_add, |
890 | .region_del = vhost_region_del, | |
50c1e149 | 891 | .region_nop = vhost_region_nop, |
04097f7c AK |
892 | .log_start = vhost_log_start, |
893 | .log_stop = vhost_log_stop, | |
894 | .log_sync = vhost_log_sync, | |
895 | .log_global_start = vhost_log_global_start, | |
896 | .log_global_stop = vhost_log_global_stop, | |
80a1ea37 AK |
897 | .eventfd_add = vhost_eventfd_add, |
898 | .eventfd_del = vhost_eventfd_del, | |
72e22d2f | 899 | .priority = 10 |
04097f7c | 900 | }; |
7145872e MT |
901 | hdev->migration_blocker = NULL; |
902 | if (!(hdev->features & (0x1 << VHOST_F_LOG_ALL))) { | |
903 | error_setg(&hdev->migration_blocker, | |
904 | "Migration disabled: vhost lacks VHOST_F_LOG_ALL feature."); | |
905 | migrate_add_blocker(hdev->migration_blocker); | |
906 | } | |
7267c094 | 907 | hdev->mem = g_malloc0(offsetof(struct vhost_memory, regions)); |
2817b260 AK |
908 | hdev->n_mem_sections = 0; |
909 | hdev->mem_sections = NULL; | |
d5970055 MT |
910 | hdev->log = NULL; |
911 | hdev->log_size = 0; | |
912 | hdev->log_enabled = false; | |
913 | hdev->started = false; | |
af603142 | 914 | hdev->memory_changed = false; |
f6790af6 | 915 | memory_listener_register(&hdev->memory_listener, &address_space_memory); |
5430a28f | 916 | hdev->force = force; |
d5970055 | 917 | return 0; |
f56a1247 MT |
918 | fail_vq: |
919 | while (--i >= 0) { | |
920 | vhost_virtqueue_cleanup(hdev->vqs + i); | |
921 | } | |
d5970055 MT |
922 | fail: |
923 | r = -errno; | |
24d1eb33 | 924 | hdev->vhost_ops->vhost_backend_cleanup(hdev); |
d5970055 MT |
925 | return r; |
926 | } | |
927 | ||
928 | void vhost_dev_cleanup(struct vhost_dev *hdev) | |
929 | { | |
f56a1247 MT |
930 | int i; |
931 | for (i = 0; i < hdev->nvqs; ++i) { | |
932 | vhost_virtqueue_cleanup(hdev->vqs + i); | |
933 | } | |
04097f7c | 934 | memory_listener_unregister(&hdev->memory_listener); |
7145872e MT |
935 | if (hdev->migration_blocker) { |
936 | migrate_del_blocker(hdev->migration_blocker); | |
937 | error_free(hdev->migration_blocker); | |
938 | } | |
7267c094 | 939 | g_free(hdev->mem); |
2817b260 | 940 | g_free(hdev->mem_sections); |
24d1eb33 | 941 | hdev->vhost_ops->vhost_backend_cleanup(hdev); |
d5970055 MT |
942 | } |
943 | ||
5430a28f MT |
944 | bool vhost_dev_query(struct vhost_dev *hdev, VirtIODevice *vdev) |
945 | { | |
1c819449 FK |
946 | BusState *qbus = BUS(qdev_get_parent_bus(DEVICE(vdev))); |
947 | VirtioBusState *vbus = VIRTIO_BUS(qbus); | |
948 | VirtioBusClass *k = VIRTIO_BUS_GET_CLASS(vbus); | |
949 | ||
950 | return !k->query_guest_notifiers || | |
951 | k->query_guest_notifiers(qbus->parent) || | |
952 | hdev->force; | |
5430a28f MT |
953 | } |
954 | ||
b0b3db79 MT |
955 | /* Stop processing guest IO notifications in qemu. |
956 | * Start processing them in vhost in kernel. | |
957 | */ | |
958 | int vhost_dev_enable_notifiers(struct vhost_dev *hdev, VirtIODevice *vdev) | |
959 | { | |
1c819449 FK |
960 | BusState *qbus = BUS(qdev_get_parent_bus(DEVICE(vdev))); |
961 | VirtioBusState *vbus = VIRTIO_BUS(qbus); | |
962 | VirtioBusClass *k = VIRTIO_BUS_GET_CLASS(vbus); | |
b0b3db79 | 963 | int i, r; |
1c819449 | 964 | if (!k->set_host_notifier) { |
b0b3db79 MT |
965 | fprintf(stderr, "binding does not support host notifiers\n"); |
966 | r = -ENOSYS; | |
967 | goto fail; | |
968 | } | |
969 | ||
970 | for (i = 0; i < hdev->nvqs; ++i) { | |
1c819449 | 971 | r = k->set_host_notifier(qbus->parent, hdev->vq_index + i, true); |
b0b3db79 MT |
972 | if (r < 0) { |
973 | fprintf(stderr, "vhost VQ %d notifier binding failed: %d\n", i, -r); | |
974 | goto fail_vq; | |
975 | } | |
976 | } | |
977 | ||
978 | return 0; | |
979 | fail_vq: | |
980 | while (--i >= 0) { | |
1c819449 | 981 | r = k->set_host_notifier(qbus->parent, hdev->vq_index + i, false); |
b0b3db79 MT |
982 | if (r < 0) { |
983 | fprintf(stderr, "vhost VQ %d notifier cleanup error: %d\n", i, -r); | |
984 | fflush(stderr); | |
985 | } | |
986 | assert (r >= 0); | |
987 | } | |
988 | fail: | |
989 | return r; | |
990 | } | |
991 | ||
992 | /* Stop processing guest IO notifications in vhost. | |
993 | * Start processing them in qemu. | |
994 | * This might actually run the qemu handlers right away, | |
995 | * so virtio in qemu must be completely setup when this is called. | |
996 | */ | |
997 | void vhost_dev_disable_notifiers(struct vhost_dev *hdev, VirtIODevice *vdev) | |
998 | { | |
1c819449 FK |
999 | BusState *qbus = BUS(qdev_get_parent_bus(DEVICE(vdev))); |
1000 | VirtioBusState *vbus = VIRTIO_BUS(qbus); | |
1001 | VirtioBusClass *k = VIRTIO_BUS_GET_CLASS(vbus); | |
b0b3db79 MT |
1002 | int i, r; |
1003 | ||
1004 | for (i = 0; i < hdev->nvqs; ++i) { | |
1c819449 | 1005 | r = k->set_host_notifier(qbus->parent, hdev->vq_index + i, false); |
b0b3db79 MT |
1006 | if (r < 0) { |
1007 | fprintf(stderr, "vhost VQ %d notifier cleanup failed: %d\n", i, -r); | |
1008 | fflush(stderr); | |
1009 | } | |
1010 | assert (r >= 0); | |
1011 | } | |
1012 | } | |
1013 | ||
f56a1247 MT |
1014 | /* Test and clear event pending status. |
1015 | * Should be called after unmask to avoid losing events. | |
1016 | */ | |
1017 | bool vhost_virtqueue_pending(struct vhost_dev *hdev, int n) | |
1018 | { | |
a9f98bb5 | 1019 | struct vhost_virtqueue *vq = hdev->vqs + n - hdev->vq_index; |
a9f98bb5 | 1020 | assert(n >= hdev->vq_index && n < hdev->vq_index + hdev->nvqs); |
f56a1247 MT |
1021 | return event_notifier_test_and_clear(&vq->masked_notifier); |
1022 | } | |
1023 | ||
1024 | /* Mask/unmask events from this vq. */ | |
1025 | void vhost_virtqueue_mask(struct vhost_dev *hdev, VirtIODevice *vdev, int n, | |
1026 | bool mask) | |
1027 | { | |
1028 | struct VirtQueue *vvq = virtio_get_queue(vdev, n); | |
a9f98bb5 | 1029 | int r, index = n - hdev->vq_index; |
f56a1247 | 1030 | |
a9f98bb5 | 1031 | assert(n >= hdev->vq_index && n < hdev->vq_index + hdev->nvqs); |
f56a1247 MT |
1032 | |
1033 | struct vhost_vring_file file = { | |
a9f98bb5 | 1034 | .index = index |
f56a1247 MT |
1035 | }; |
1036 | if (mask) { | |
a9f98bb5 | 1037 | file.fd = event_notifier_get_fd(&hdev->vqs[index].masked_notifier); |
f56a1247 MT |
1038 | } else { |
1039 | file.fd = event_notifier_get_fd(virtio_queue_get_guest_notifier(vvq)); | |
1040 | } | |
24d1eb33 | 1041 | r = hdev->vhost_ops->vhost_call(hdev, VHOST_SET_VRING_CALL, &file); |
f56a1247 MT |
1042 | assert(r >= 0); |
1043 | } | |
1044 | ||
2e6d46d7 NN |
1045 | unsigned vhost_get_features(struct vhost_dev *hdev, const int *feature_bits, |
1046 | unsigned features) | |
1047 | { | |
1048 | const int *bit = feature_bits; | |
1049 | while (*bit != VHOST_INVALID_FEATURE_BIT) { | |
1050 | unsigned bit_mask = (1 << *bit); | |
1051 | if (!(hdev->features & bit_mask)) { | |
1052 | features &= ~bit_mask; | |
1053 | } | |
1054 | bit++; | |
1055 | } | |
1056 | return features; | |
1057 | } | |
1058 | ||
1059 | void vhost_ack_features(struct vhost_dev *hdev, const int *feature_bits, | |
1060 | unsigned features) | |
1061 | { | |
1062 | const int *bit = feature_bits; | |
1063 | while (*bit != VHOST_INVALID_FEATURE_BIT) { | |
1064 | unsigned bit_mask = (1 << *bit); | |
1065 | if (features & bit_mask) { | |
1066 | hdev->acked_features |= bit_mask; | |
1067 | } | |
1068 | bit++; | |
1069 | } | |
1070 | } | |
1071 | ||
b0b3db79 | 1072 | /* Host notifiers must be enabled at this point. */ |
d5970055 MT |
1073 | int vhost_dev_start(struct vhost_dev *hdev, VirtIODevice *vdev) |
1074 | { | |
1075 | int i, r; | |
24f4fe34 MT |
1076 | |
1077 | hdev->started = true; | |
1078 | ||
d5970055 MT |
1079 | r = vhost_dev_set_features(hdev, hdev->log_enabled); |
1080 | if (r < 0) { | |
54dd9321 | 1081 | goto fail_features; |
d5970055 | 1082 | } |
24d1eb33 | 1083 | r = hdev->vhost_ops->vhost_call(hdev, VHOST_SET_MEM_TABLE, hdev->mem); |
d5970055 MT |
1084 | if (r < 0) { |
1085 | r = -errno; | |
54dd9321 | 1086 | goto fail_mem; |
d5970055 | 1087 | } |
d154e0ba | 1088 | for (i = 0; i < hdev->nvqs; ++i) { |
f56a1247 | 1089 | r = vhost_virtqueue_start(hdev, |
a9f98bb5 JW |
1090 | vdev, |
1091 | hdev->vqs + i, | |
1092 | hdev->vq_index + i); | |
d154e0ba MT |
1093 | if (r < 0) { |
1094 | goto fail_vq; | |
1095 | } | |
1096 | } | |
1097 | ||
d5970055 | 1098 | if (hdev->log_enabled) { |
e05ca820 MT |
1099 | uint64_t log_base; |
1100 | ||
d5970055 | 1101 | hdev->log_size = vhost_get_log_size(hdev); |
309750fa JW |
1102 | hdev->log = vhost_log_get(hdev->log_size); |
1103 | log_base = (uintptr_t)hdev->log->log; | |
1104 | r = hdev->vhost_ops->vhost_call(hdev, VHOST_SET_LOG_BASE, | |
1105 | hdev->log_size ? &log_base : NULL); | |
d5970055 MT |
1106 | if (r < 0) { |
1107 | r = -errno; | |
54dd9321 | 1108 | goto fail_log; |
d5970055 MT |
1109 | } |
1110 | } | |
d154e0ba | 1111 | |
d5970055 | 1112 | return 0; |
54dd9321 | 1113 | fail_log: |
309750fa JW |
1114 | if (hdev->log_size) { |
1115 | vhost_log_put(hdev, false); | |
1116 | } | |
d5970055 MT |
1117 | fail_vq: |
1118 | while (--i >= 0) { | |
f56a1247 | 1119 | vhost_virtqueue_stop(hdev, |
a9f98bb5 JW |
1120 | vdev, |
1121 | hdev->vqs + i, | |
1122 | hdev->vq_index + i); | |
d5970055 | 1123 | } |
a9f98bb5 | 1124 | i = hdev->nvqs; |
54dd9321 MT |
1125 | fail_mem: |
1126 | fail_features: | |
24f4fe34 MT |
1127 | |
1128 | hdev->started = false; | |
d5970055 MT |
1129 | return r; |
1130 | } | |
1131 | ||
b0b3db79 | 1132 | /* Host notifiers must be enabled at this point. */ |
d5970055 MT |
1133 | void vhost_dev_stop(struct vhost_dev *hdev, VirtIODevice *vdev) |
1134 | { | |
a9f98bb5 | 1135 | int i; |
54dd9321 | 1136 | |
d5970055 | 1137 | for (i = 0; i < hdev->nvqs; ++i) { |
f56a1247 | 1138 | vhost_virtqueue_stop(hdev, |
a9f98bb5 JW |
1139 | vdev, |
1140 | hdev->vqs + i, | |
1141 | hdev->vq_index + i); | |
d5970055 | 1142 | } |
54dd9321 | 1143 | |
309750fa | 1144 | vhost_log_put(hdev, true); |
d5970055 | 1145 | hdev->started = false; |
c1be973a | 1146 | hdev->log = NULL; |
d5970055 MT |
1147 | hdev->log_size = 0; |
1148 | } | |
a9f98bb5 | 1149 |