]>
Commit | Line | Data |
---|---|---|
1 | /* | |
2 | * Copyright (C) 2010 Citrix Ltd. | |
3 | * | |
4 | * This work is licensed under the terms of the GNU GPL, version 2. See | |
5 | * the COPYING file in the top-level directory. | |
6 | * | |
7 | * Contributions after 2012-01-13 are licensed under the terms of the | |
8 | * GNU GPL, version 2 or (at your option) any later version. | |
9 | */ | |
10 | ||
11 | #include <sys/mman.h> | |
12 | ||
13 | #include "hw/pci/pci.h" | |
14 | #include "hw/i386/pc.h" | |
15 | #include "hw/xen/xen_common.h" | |
16 | #include "hw/xen/xen_backend.h" | |
17 | #include "qmp-commands.h" | |
18 | ||
19 | #include "sysemu/char.h" | |
20 | #include "qemu/range.h" | |
21 | #include "sysemu/xen-mapcache.h" | |
22 | #include "trace.h" | |
23 | #include "exec/address-spaces.h" | |
24 | ||
25 | #include <xen/hvm/ioreq.h> | |
26 | #include <xen/hvm/params.h> | |
27 | #include <xen/hvm/e820.h> | |
28 | ||
29 | //#define DEBUG_XEN | |
30 | ||
31 | #ifdef DEBUG_XEN | |
32 | #define DPRINTF(fmt, ...) \ | |
33 | do { fprintf(stderr, "xen: " fmt, ## __VA_ARGS__); } while (0) | |
34 | #else | |
35 | #define DPRINTF(fmt, ...) \ | |
36 | do { } while (0) | |
37 | #endif | |
38 | ||
39 | static MemoryRegion ram_memory, ram_640k, ram_lo, ram_hi; | |
40 | static MemoryRegion *framebuffer; | |
41 | static bool xen_in_migration; | |
42 | ||
43 | /* Compatibility with older version */ | |
44 | #if __XEN_LATEST_INTERFACE_VERSION__ < 0x0003020a | |
45 | static inline uint32_t xen_vcpu_eport(shared_iopage_t *shared_page, int i) | |
46 | { | |
47 | return shared_page->vcpu_iodata[i].vp_eport; | |
48 | } | |
49 | static inline ioreq_t *xen_vcpu_ioreq(shared_iopage_t *shared_page, int vcpu) | |
50 | { | |
51 | return &shared_page->vcpu_iodata[vcpu].vp_ioreq; | |
52 | } | |
53 | # define FMT_ioreq_size PRIx64 | |
54 | #else | |
55 | static inline uint32_t xen_vcpu_eport(shared_iopage_t *shared_page, int i) | |
56 | { | |
57 | return shared_page->vcpu_ioreq[i].vp_eport; | |
58 | } | |
59 | static inline ioreq_t *xen_vcpu_ioreq(shared_iopage_t *shared_page, int vcpu) | |
60 | { | |
61 | return &shared_page->vcpu_ioreq[vcpu]; | |
62 | } | |
63 | # define FMT_ioreq_size "u" | |
64 | #endif | |
65 | #ifndef HVM_PARAM_BUFIOREQ_EVTCHN | |
66 | #define HVM_PARAM_BUFIOREQ_EVTCHN 26 | |
67 | #endif | |
68 | ||
69 | #define BUFFER_IO_MAX_DELAY 100 | |
70 | ||
71 | typedef struct XenPhysmap { | |
72 | hwaddr start_addr; | |
73 | ram_addr_t size; | |
74 | char *name; | |
75 | hwaddr phys_offset; | |
76 | ||
77 | QLIST_ENTRY(XenPhysmap) list; | |
78 | } XenPhysmap; | |
79 | ||
80 | typedef struct XenIOState { | |
81 | shared_iopage_t *shared_page; | |
82 | buffered_iopage_t *buffered_io_page; | |
83 | QEMUTimer *buffered_io_timer; | |
84 | /* the evtchn port for polling the notification, */ | |
85 | evtchn_port_t *ioreq_local_port; | |
86 | /* evtchn local port for buffered io */ | |
87 | evtchn_port_t bufioreq_local_port; | |
88 | /* the evtchn fd for polling */ | |
89 | XenEvtchn xce_handle; | |
90 | /* which vcpu we are serving */ | |
91 | int send_vcpu; | |
92 | ||
93 | struct xs_handle *xenstore; | |
94 | MemoryListener memory_listener; | |
95 | QLIST_HEAD(, XenPhysmap) physmap; | |
96 | hwaddr free_phys_offset; | |
97 | const XenPhysmap *log_for_dirtybit; | |
98 | ||
99 | Notifier exit; | |
100 | Notifier suspend; | |
101 | } XenIOState; | |
102 | ||
103 | /* Xen specific function for piix pci */ | |
104 | ||
105 | int xen_pci_slot_get_pirq(PCIDevice *pci_dev, int irq_num) | |
106 | { | |
107 | return irq_num + ((pci_dev->devfn >> 3) << 2); | |
108 | } | |
109 | ||
110 | void xen_piix3_set_irq(void *opaque, int irq_num, int level) | |
111 | { | |
112 | xc_hvm_set_pci_intx_level(xen_xc, xen_domid, 0, 0, irq_num >> 2, | |
113 | irq_num & 3, level); | |
114 | } | |
115 | ||
116 | void xen_piix_pci_write_config_client(uint32_t address, uint32_t val, int len) | |
117 | { | |
118 | int i; | |
119 | ||
120 | /* Scan for updates to PCI link routes (0x60-0x63). */ | |
121 | for (i = 0; i < len; i++) { | |
122 | uint8_t v = (val >> (8 * i)) & 0xff; | |
123 | if (v & 0x80) { | |
124 | v = 0; | |
125 | } | |
126 | v &= 0xf; | |
127 | if (((address + i) >= 0x60) && ((address + i) <= 0x63)) { | |
128 | xc_hvm_set_pci_link_route(xen_xc, xen_domid, address + i - 0x60, v); | |
129 | } | |
130 | } | |
131 | } | |
132 | ||
133 | void xen_hvm_inject_msi(uint64_t addr, uint32_t data) | |
134 | { | |
135 | xen_xc_hvm_inject_msi(xen_xc, xen_domid, addr, data); | |
136 | } | |
137 | ||
138 | static void xen_suspend_notifier(Notifier *notifier, void *data) | |
139 | { | |
140 | xc_set_hvm_param(xen_xc, xen_domid, HVM_PARAM_ACPI_S_STATE, 3); | |
141 | } | |
142 | ||
143 | /* Xen Interrupt Controller */ | |
144 | ||
145 | static void xen_set_irq(void *opaque, int irq, int level) | |
146 | { | |
147 | xc_hvm_set_isa_irq_level(xen_xc, xen_domid, irq, level); | |
148 | } | |
149 | ||
150 | qemu_irq *xen_interrupt_controller_init(void) | |
151 | { | |
152 | return qemu_allocate_irqs(xen_set_irq, NULL, 16); | |
153 | } | |
154 | ||
155 | /* Memory Ops */ | |
156 | ||
157 | static void xen_ram_init(ram_addr_t ram_size) | |
158 | { | |
159 | MemoryRegion *sysmem = get_system_memory(); | |
160 | ram_addr_t below_4g_mem_size, above_4g_mem_size = 0; | |
161 | ram_addr_t block_len; | |
162 | ||
163 | block_len = ram_size; | |
164 | if (ram_size >= HVM_BELOW_4G_RAM_END) { | |
165 | /* Xen does not allocate the memory continuously, and keep a hole at | |
166 | * HVM_BELOW_4G_MMIO_START of HVM_BELOW_4G_MMIO_LENGTH | |
167 | */ | |
168 | block_len += HVM_BELOW_4G_MMIO_LENGTH; | |
169 | } | |
170 | memory_region_init_ram(&ram_memory, NULL, "xen.ram", block_len); | |
171 | vmstate_register_ram_global(&ram_memory); | |
172 | ||
173 | if (ram_size >= HVM_BELOW_4G_RAM_END) { | |
174 | above_4g_mem_size = ram_size - HVM_BELOW_4G_RAM_END; | |
175 | below_4g_mem_size = HVM_BELOW_4G_RAM_END; | |
176 | } else { | |
177 | below_4g_mem_size = ram_size; | |
178 | } | |
179 | ||
180 | memory_region_init_alias(&ram_640k, NULL, "xen.ram.640k", | |
181 | &ram_memory, 0, 0xa0000); | |
182 | memory_region_add_subregion(sysmem, 0, &ram_640k); | |
183 | /* Skip of the VGA IO memory space, it will be registered later by the VGA | |
184 | * emulated device. | |
185 | * | |
186 | * The area between 0xc0000 and 0x100000 will be used by SeaBIOS to load | |
187 | * the Options ROM, so it is registered here as RAM. | |
188 | */ | |
189 | memory_region_init_alias(&ram_lo, NULL, "xen.ram.lo", | |
190 | &ram_memory, 0xc0000, below_4g_mem_size - 0xc0000); | |
191 | memory_region_add_subregion(sysmem, 0xc0000, &ram_lo); | |
192 | if (above_4g_mem_size > 0) { | |
193 | memory_region_init_alias(&ram_hi, NULL, "xen.ram.hi", | |
194 | &ram_memory, 0x100000000ULL, | |
195 | above_4g_mem_size); | |
196 | memory_region_add_subregion(sysmem, 0x100000000ULL, &ram_hi); | |
197 | } | |
198 | } | |
199 | ||
200 | void xen_ram_alloc(ram_addr_t ram_addr, ram_addr_t size, MemoryRegion *mr) | |
201 | { | |
202 | unsigned long nr_pfn; | |
203 | xen_pfn_t *pfn_list; | |
204 | int i; | |
205 | ||
206 | if (runstate_check(RUN_STATE_INMIGRATE)) { | |
207 | /* RAM already populated in Xen */ | |
208 | fprintf(stderr, "%s: do not alloc "RAM_ADDR_FMT | |
209 | " bytes of ram at "RAM_ADDR_FMT" when runstate is INMIGRATE\n", | |
210 | __func__, size, ram_addr); | |
211 | return; | |
212 | } | |
213 | ||
214 | if (mr == &ram_memory) { | |
215 | return; | |
216 | } | |
217 | ||
218 | trace_xen_ram_alloc(ram_addr, size); | |
219 | ||
220 | nr_pfn = size >> TARGET_PAGE_BITS; | |
221 | pfn_list = g_malloc(sizeof (*pfn_list) * nr_pfn); | |
222 | ||
223 | for (i = 0; i < nr_pfn; i++) { | |
224 | pfn_list[i] = (ram_addr >> TARGET_PAGE_BITS) + i; | |
225 | } | |
226 | ||
227 | if (xc_domain_populate_physmap_exact(xen_xc, xen_domid, nr_pfn, 0, 0, pfn_list)) { | |
228 | hw_error("xen: failed to populate ram at " RAM_ADDR_FMT, ram_addr); | |
229 | } | |
230 | ||
231 | g_free(pfn_list); | |
232 | } | |
233 | ||
234 | static XenPhysmap *get_physmapping(XenIOState *state, | |
235 | hwaddr start_addr, ram_addr_t size) | |
236 | { | |
237 | XenPhysmap *physmap = NULL; | |
238 | ||
239 | start_addr &= TARGET_PAGE_MASK; | |
240 | ||
241 | QLIST_FOREACH(physmap, &state->physmap, list) { | |
242 | if (range_covers_byte(physmap->start_addr, physmap->size, start_addr)) { | |
243 | return physmap; | |
244 | } | |
245 | } | |
246 | return NULL; | |
247 | } | |
248 | ||
249 | static hwaddr xen_phys_offset_to_gaddr(hwaddr start_addr, | |
250 | ram_addr_t size, void *opaque) | |
251 | { | |
252 | hwaddr addr = start_addr & TARGET_PAGE_MASK; | |
253 | XenIOState *xen_io_state = opaque; | |
254 | XenPhysmap *physmap = NULL; | |
255 | ||
256 | QLIST_FOREACH(physmap, &xen_io_state->physmap, list) { | |
257 | if (range_covers_byte(physmap->phys_offset, physmap->size, addr)) { | |
258 | return physmap->start_addr; | |
259 | } | |
260 | } | |
261 | ||
262 | return start_addr; | |
263 | } | |
264 | ||
265 | #if CONFIG_XEN_CTRL_INTERFACE_VERSION >= 340 | |
266 | static int xen_add_to_physmap(XenIOState *state, | |
267 | hwaddr start_addr, | |
268 | ram_addr_t size, | |
269 | MemoryRegion *mr, | |
270 | hwaddr offset_within_region) | |
271 | { | |
272 | unsigned long i = 0; | |
273 | int rc = 0; | |
274 | XenPhysmap *physmap = NULL; | |
275 | hwaddr pfn, start_gpfn; | |
276 | hwaddr phys_offset = memory_region_get_ram_addr(mr); | |
277 | char path[80], value[17]; | |
278 | ||
279 | if (get_physmapping(state, start_addr, size)) { | |
280 | return 0; | |
281 | } | |
282 | if (size <= 0) { | |
283 | return -1; | |
284 | } | |
285 | ||
286 | /* Xen can only handle a single dirty log region for now and we want | |
287 | * the linear framebuffer to be that region. | |
288 | * Avoid tracking any regions that is not videoram and avoid tracking | |
289 | * the legacy vga region. */ | |
290 | if (mr == framebuffer && start_addr > 0xbffff) { | |
291 | goto go_physmap; | |
292 | } | |
293 | return -1; | |
294 | ||
295 | go_physmap: | |
296 | DPRINTF("mapping vram to %"HWADDR_PRIx" - %"HWADDR_PRIx"\n", | |
297 | start_addr, start_addr + size); | |
298 | ||
299 | pfn = phys_offset >> TARGET_PAGE_BITS; | |
300 | start_gpfn = start_addr >> TARGET_PAGE_BITS; | |
301 | for (i = 0; i < size >> TARGET_PAGE_BITS; i++) { | |
302 | unsigned long idx = pfn + i; | |
303 | xen_pfn_t gpfn = start_gpfn + i; | |
304 | ||
305 | rc = xc_domain_add_to_physmap(xen_xc, xen_domid, XENMAPSPACE_gmfn, idx, gpfn); | |
306 | if (rc) { | |
307 | DPRINTF("add_to_physmap MFN %"PRI_xen_pfn" to PFN %" | |
308 | PRI_xen_pfn" failed: %d\n", idx, gpfn, rc); | |
309 | return -rc; | |
310 | } | |
311 | } | |
312 | ||
313 | physmap = g_malloc(sizeof (XenPhysmap)); | |
314 | ||
315 | physmap->start_addr = start_addr; | |
316 | physmap->size = size; | |
317 | physmap->name = (char *)mr->name; | |
318 | physmap->phys_offset = phys_offset; | |
319 | ||
320 | QLIST_INSERT_HEAD(&state->physmap, physmap, list); | |
321 | ||
322 | xc_domain_pin_memory_cacheattr(xen_xc, xen_domid, | |
323 | start_addr >> TARGET_PAGE_BITS, | |
324 | (start_addr + size) >> TARGET_PAGE_BITS, | |
325 | XEN_DOMCTL_MEM_CACHEATTR_WB); | |
326 | ||
327 | snprintf(path, sizeof(path), | |
328 | "/local/domain/0/device-model/%d/physmap/%"PRIx64"/start_addr", | |
329 | xen_domid, (uint64_t)phys_offset); | |
330 | snprintf(value, sizeof(value), "%"PRIx64, (uint64_t)start_addr); | |
331 | if (!xs_write(state->xenstore, 0, path, value, strlen(value))) { | |
332 | return -1; | |
333 | } | |
334 | snprintf(path, sizeof(path), | |
335 | "/local/domain/0/device-model/%d/physmap/%"PRIx64"/size", | |
336 | xen_domid, (uint64_t)phys_offset); | |
337 | snprintf(value, sizeof(value), "%"PRIx64, (uint64_t)size); | |
338 | if (!xs_write(state->xenstore, 0, path, value, strlen(value))) { | |
339 | return -1; | |
340 | } | |
341 | if (mr->name) { | |
342 | snprintf(path, sizeof(path), | |
343 | "/local/domain/0/device-model/%d/physmap/%"PRIx64"/name", | |
344 | xen_domid, (uint64_t)phys_offset); | |
345 | if (!xs_write(state->xenstore, 0, path, mr->name, strlen(mr->name))) { | |
346 | return -1; | |
347 | } | |
348 | } | |
349 | ||
350 | return 0; | |
351 | } | |
352 | ||
353 | static int xen_remove_from_physmap(XenIOState *state, | |
354 | hwaddr start_addr, | |
355 | ram_addr_t size) | |
356 | { | |
357 | unsigned long i = 0; | |
358 | int rc = 0; | |
359 | XenPhysmap *physmap = NULL; | |
360 | hwaddr phys_offset = 0; | |
361 | ||
362 | physmap = get_physmapping(state, start_addr, size); | |
363 | if (physmap == NULL) { | |
364 | return -1; | |
365 | } | |
366 | ||
367 | phys_offset = physmap->phys_offset; | |
368 | size = physmap->size; | |
369 | ||
370 | DPRINTF("unmapping vram to %"HWADDR_PRIx" - %"HWADDR_PRIx", from ", | |
371 | "%"HWADDR_PRIx"\n", phys_offset, phys_offset + size, start_addr); | |
372 | ||
373 | size >>= TARGET_PAGE_BITS; | |
374 | start_addr >>= TARGET_PAGE_BITS; | |
375 | phys_offset >>= TARGET_PAGE_BITS; | |
376 | for (i = 0; i < size; i++) { | |
377 | unsigned long idx = start_addr + i; | |
378 | xen_pfn_t gpfn = phys_offset + i; | |
379 | ||
380 | rc = xc_domain_add_to_physmap(xen_xc, xen_domid, XENMAPSPACE_gmfn, idx, gpfn); | |
381 | if (rc) { | |
382 | fprintf(stderr, "add_to_physmap MFN %"PRI_xen_pfn" to PFN %" | |
383 | PRI_xen_pfn" failed: %d\n", idx, gpfn, rc); | |
384 | return -rc; | |
385 | } | |
386 | } | |
387 | ||
388 | QLIST_REMOVE(physmap, list); | |
389 | if (state->log_for_dirtybit == physmap) { | |
390 | state->log_for_dirtybit = NULL; | |
391 | } | |
392 | g_free(physmap); | |
393 | ||
394 | return 0; | |
395 | } | |
396 | ||
397 | #else | |
398 | static int xen_add_to_physmap(XenIOState *state, | |
399 | hwaddr start_addr, | |
400 | ram_addr_t size, | |
401 | MemoryRegion *mr, | |
402 | hwaddr offset_within_region) | |
403 | { | |
404 | return -ENOSYS; | |
405 | } | |
406 | ||
407 | static int xen_remove_from_physmap(XenIOState *state, | |
408 | hwaddr start_addr, | |
409 | ram_addr_t size) | |
410 | { | |
411 | return -ENOSYS; | |
412 | } | |
413 | #endif | |
414 | ||
415 | static void xen_set_memory(struct MemoryListener *listener, | |
416 | MemoryRegionSection *section, | |
417 | bool add) | |
418 | { | |
419 | XenIOState *state = container_of(listener, XenIOState, memory_listener); | |
420 | hwaddr start_addr = section->offset_within_address_space; | |
421 | ram_addr_t size = int128_get64(section->size); | |
422 | bool log_dirty = memory_region_is_logging(section->mr); | |
423 | hvmmem_type_t mem_type; | |
424 | ||
425 | if (!memory_region_is_ram(section->mr)) { | |
426 | return; | |
427 | } | |
428 | ||
429 | if (!(section->mr != &ram_memory | |
430 | && ( (log_dirty && add) || (!log_dirty && !add)))) { | |
431 | return; | |
432 | } | |
433 | ||
434 | trace_xen_client_set_memory(start_addr, size, log_dirty); | |
435 | ||
436 | start_addr &= TARGET_PAGE_MASK; | |
437 | size = TARGET_PAGE_ALIGN(size); | |
438 | ||
439 | if (add) { | |
440 | if (!memory_region_is_rom(section->mr)) { | |
441 | xen_add_to_physmap(state, start_addr, size, | |
442 | section->mr, section->offset_within_region); | |
443 | } else { | |
444 | mem_type = HVMMEM_ram_ro; | |
445 | if (xc_hvm_set_mem_type(xen_xc, xen_domid, mem_type, | |
446 | start_addr >> TARGET_PAGE_BITS, | |
447 | size >> TARGET_PAGE_BITS)) { | |
448 | DPRINTF("xc_hvm_set_mem_type error, addr: "TARGET_FMT_plx"\n", | |
449 | start_addr); | |
450 | } | |
451 | } | |
452 | } else { | |
453 | if (xen_remove_from_physmap(state, start_addr, size) < 0) { | |
454 | DPRINTF("physmapping does not exist at "TARGET_FMT_plx"\n", start_addr); | |
455 | } | |
456 | } | |
457 | } | |
458 | ||
459 | static void xen_region_add(MemoryListener *listener, | |
460 | MemoryRegionSection *section) | |
461 | { | |
462 | xen_set_memory(listener, section, true); | |
463 | } | |
464 | ||
465 | static void xen_region_del(MemoryListener *listener, | |
466 | MemoryRegionSection *section) | |
467 | { | |
468 | xen_set_memory(listener, section, false); | |
469 | } | |
470 | ||
471 | static void xen_sync_dirty_bitmap(XenIOState *state, | |
472 | hwaddr start_addr, | |
473 | ram_addr_t size) | |
474 | { | |
475 | hwaddr npages = size >> TARGET_PAGE_BITS; | |
476 | const int width = sizeof(unsigned long) * 8; | |
477 | unsigned long bitmap[(npages + width - 1) / width]; | |
478 | int rc, i, j; | |
479 | const XenPhysmap *physmap = NULL; | |
480 | ||
481 | physmap = get_physmapping(state, start_addr, size); | |
482 | if (physmap == NULL) { | |
483 | /* not handled */ | |
484 | return; | |
485 | } | |
486 | ||
487 | if (state->log_for_dirtybit == NULL) { | |
488 | state->log_for_dirtybit = physmap; | |
489 | } else if (state->log_for_dirtybit != physmap) { | |
490 | /* Only one range for dirty bitmap can be tracked. */ | |
491 | return; | |
492 | } | |
493 | ||
494 | rc = xc_hvm_track_dirty_vram(xen_xc, xen_domid, | |
495 | start_addr >> TARGET_PAGE_BITS, npages, | |
496 | bitmap); | |
497 | if (rc < 0) { | |
498 | if (rc != -ENODATA) { | |
499 | memory_region_set_dirty(framebuffer, 0, size); | |
500 | DPRINTF("xen: track_dirty_vram failed (0x" TARGET_FMT_plx | |
501 | ", 0x" TARGET_FMT_plx "): %s\n", | |
502 | start_addr, start_addr + size, strerror(-rc)); | |
503 | } | |
504 | return; | |
505 | } | |
506 | ||
507 | for (i = 0; i < ARRAY_SIZE(bitmap); i++) { | |
508 | unsigned long map = bitmap[i]; | |
509 | while (map != 0) { | |
510 | j = ffsl(map) - 1; | |
511 | map &= ~(1ul << j); | |
512 | memory_region_set_dirty(framebuffer, | |
513 | (i * width + j) * TARGET_PAGE_SIZE, | |
514 | TARGET_PAGE_SIZE); | |
515 | }; | |
516 | } | |
517 | } | |
518 | ||
519 | static void xen_log_start(MemoryListener *listener, | |
520 | MemoryRegionSection *section) | |
521 | { | |
522 | XenIOState *state = container_of(listener, XenIOState, memory_listener); | |
523 | ||
524 | xen_sync_dirty_bitmap(state, section->offset_within_address_space, | |
525 | int128_get64(section->size)); | |
526 | } | |
527 | ||
528 | static void xen_log_stop(MemoryListener *listener, MemoryRegionSection *section) | |
529 | { | |
530 | XenIOState *state = container_of(listener, XenIOState, memory_listener); | |
531 | ||
532 | state->log_for_dirtybit = NULL; | |
533 | /* Disable dirty bit tracking */ | |
534 | xc_hvm_track_dirty_vram(xen_xc, xen_domid, 0, 0, NULL); | |
535 | } | |
536 | ||
537 | static void xen_log_sync(MemoryListener *listener, MemoryRegionSection *section) | |
538 | { | |
539 | XenIOState *state = container_of(listener, XenIOState, memory_listener); | |
540 | ||
541 | xen_sync_dirty_bitmap(state, section->offset_within_address_space, | |
542 | int128_get64(section->size)); | |
543 | } | |
544 | ||
545 | static void xen_log_global_start(MemoryListener *listener) | |
546 | { | |
547 | if (xen_enabled()) { | |
548 | xen_in_migration = true; | |
549 | } | |
550 | } | |
551 | ||
552 | static void xen_log_global_stop(MemoryListener *listener) | |
553 | { | |
554 | xen_in_migration = false; | |
555 | } | |
556 | ||
557 | static MemoryListener xen_memory_listener = { | |
558 | .region_add = xen_region_add, | |
559 | .region_del = xen_region_del, | |
560 | .log_start = xen_log_start, | |
561 | .log_stop = xen_log_stop, | |
562 | .log_sync = xen_log_sync, | |
563 | .log_global_start = xen_log_global_start, | |
564 | .log_global_stop = xen_log_global_stop, | |
565 | .priority = 10, | |
566 | }; | |
567 | ||
568 | void qmp_xen_set_global_dirty_log(bool enable, Error **errp) | |
569 | { | |
570 | if (enable) { | |
571 | memory_global_dirty_log_start(); | |
572 | } else { | |
573 | memory_global_dirty_log_stop(); | |
574 | } | |
575 | } | |
576 | ||
577 | /* get the ioreq packets from share mem */ | |
578 | static ioreq_t *cpu_get_ioreq_from_shared_memory(XenIOState *state, int vcpu) | |
579 | { | |
580 | ioreq_t *req = xen_vcpu_ioreq(state->shared_page, vcpu); | |
581 | ||
582 | if (req->state != STATE_IOREQ_READY) { | |
583 | DPRINTF("I/O request not ready: " | |
584 | "%x, ptr: %x, port: %"PRIx64", " | |
585 | "data: %"PRIx64", count: %" FMT_ioreq_size ", size: %" FMT_ioreq_size "\n", | |
586 | req->state, req->data_is_ptr, req->addr, | |
587 | req->data, req->count, req->size); | |
588 | return NULL; | |
589 | } | |
590 | ||
591 | xen_rmb(); /* see IOREQ_READY /then/ read contents of ioreq */ | |
592 | ||
593 | req->state = STATE_IOREQ_INPROCESS; | |
594 | return req; | |
595 | } | |
596 | ||
597 | /* use poll to get the port notification */ | |
598 | /* ioreq_vec--out,the */ | |
599 | /* retval--the number of ioreq packet */ | |
600 | static ioreq_t *cpu_get_ioreq(XenIOState *state) | |
601 | { | |
602 | int i; | |
603 | evtchn_port_t port; | |
604 | ||
605 | port = xc_evtchn_pending(state->xce_handle); | |
606 | if (port == state->bufioreq_local_port) { | |
607 | qemu_mod_timer(state->buffered_io_timer, | |
608 | BUFFER_IO_MAX_DELAY + qemu_get_clock_ms(rt_clock)); | |
609 | return NULL; | |
610 | } | |
611 | ||
612 | if (port != -1) { | |
613 | for (i = 0; i < smp_cpus; i++) { | |
614 | if (state->ioreq_local_port[i] == port) { | |
615 | break; | |
616 | } | |
617 | } | |
618 | ||
619 | if (i == smp_cpus) { | |
620 | hw_error("Fatal error while trying to get io event!\n"); | |
621 | } | |
622 | ||
623 | /* unmask the wanted port again */ | |
624 | xc_evtchn_unmask(state->xce_handle, port); | |
625 | ||
626 | /* get the io packet from shared memory */ | |
627 | state->send_vcpu = i; | |
628 | return cpu_get_ioreq_from_shared_memory(state, i); | |
629 | } | |
630 | ||
631 | /* read error or read nothing */ | |
632 | return NULL; | |
633 | } | |
634 | ||
635 | static uint32_t do_inp(pio_addr_t addr, unsigned long size) | |
636 | { | |
637 | switch (size) { | |
638 | case 1: | |
639 | return cpu_inb(addr); | |
640 | case 2: | |
641 | return cpu_inw(addr); | |
642 | case 4: | |
643 | return cpu_inl(addr); | |
644 | default: | |
645 | hw_error("inp: bad size: %04"FMT_pioaddr" %lx", addr, size); | |
646 | } | |
647 | } | |
648 | ||
649 | static void do_outp(pio_addr_t addr, | |
650 | unsigned long size, uint32_t val) | |
651 | { | |
652 | switch (size) { | |
653 | case 1: | |
654 | return cpu_outb(addr, val); | |
655 | case 2: | |
656 | return cpu_outw(addr, val); | |
657 | case 4: | |
658 | return cpu_outl(addr, val); | |
659 | default: | |
660 | hw_error("outp: bad size: %04"FMT_pioaddr" %lx", addr, size); | |
661 | } | |
662 | } | |
663 | ||
664 | /* | |
665 | * Helper functions which read/write an object from/to physical guest | |
666 | * memory, as part of the implementation of an ioreq. | |
667 | * | |
668 | * Equivalent to | |
669 | * cpu_physical_memory_rw(addr + (req->df ? -1 : +1) * req->size * i, | |
670 | * val, req->size, 0/1) | |
671 | * except without the integer overflow problems. | |
672 | */ | |
673 | static void rw_phys_req_item(hwaddr addr, | |
674 | ioreq_t *req, uint32_t i, void *val, int rw) | |
675 | { | |
676 | /* Do everything unsigned so overflow just results in a truncated result | |
677 | * and accesses to undesired parts of guest memory, which is up | |
678 | * to the guest */ | |
679 | hwaddr offset = (hwaddr)req->size * i; | |
680 | if (req->df) { | |
681 | addr -= offset; | |
682 | } else { | |
683 | addr += offset; | |
684 | } | |
685 | cpu_physical_memory_rw(addr, val, req->size, rw); | |
686 | } | |
687 | ||
688 | static inline void read_phys_req_item(hwaddr addr, | |
689 | ioreq_t *req, uint32_t i, void *val) | |
690 | { | |
691 | rw_phys_req_item(addr, req, i, val, 0); | |
692 | } | |
693 | static inline void write_phys_req_item(hwaddr addr, | |
694 | ioreq_t *req, uint32_t i, void *val) | |
695 | { | |
696 | rw_phys_req_item(addr, req, i, val, 1); | |
697 | } | |
698 | ||
699 | ||
700 | static void cpu_ioreq_pio(ioreq_t *req) | |
701 | { | |
702 | uint32_t i; | |
703 | ||
704 | if (req->dir == IOREQ_READ) { | |
705 | if (!req->data_is_ptr) { | |
706 | req->data = do_inp(req->addr, req->size); | |
707 | } else { | |
708 | uint32_t tmp; | |
709 | ||
710 | for (i = 0; i < req->count; i++) { | |
711 | tmp = do_inp(req->addr, req->size); | |
712 | write_phys_req_item(req->data, req, i, &tmp); | |
713 | } | |
714 | } | |
715 | } else if (req->dir == IOREQ_WRITE) { | |
716 | if (!req->data_is_ptr) { | |
717 | do_outp(req->addr, req->size, req->data); | |
718 | } else { | |
719 | for (i = 0; i < req->count; i++) { | |
720 | uint32_t tmp = 0; | |
721 | ||
722 | read_phys_req_item(req->data, req, i, &tmp); | |
723 | do_outp(req->addr, req->size, tmp); | |
724 | } | |
725 | } | |
726 | } | |
727 | } | |
728 | ||
729 | static void cpu_ioreq_move(ioreq_t *req) | |
730 | { | |
731 | uint32_t i; | |
732 | ||
733 | if (!req->data_is_ptr) { | |
734 | if (req->dir == IOREQ_READ) { | |
735 | for (i = 0; i < req->count; i++) { | |
736 | read_phys_req_item(req->addr, req, i, &req->data); | |
737 | } | |
738 | } else if (req->dir == IOREQ_WRITE) { | |
739 | for (i = 0; i < req->count; i++) { | |
740 | write_phys_req_item(req->addr, req, i, &req->data); | |
741 | } | |
742 | } | |
743 | } else { | |
744 | uint64_t tmp; | |
745 | ||
746 | if (req->dir == IOREQ_READ) { | |
747 | for (i = 0; i < req->count; i++) { | |
748 | read_phys_req_item(req->addr, req, i, &tmp); | |
749 | write_phys_req_item(req->data, req, i, &tmp); | |
750 | } | |
751 | } else if (req->dir == IOREQ_WRITE) { | |
752 | for (i = 0; i < req->count; i++) { | |
753 | read_phys_req_item(req->data, req, i, &tmp); | |
754 | write_phys_req_item(req->addr, req, i, &tmp); | |
755 | } | |
756 | } | |
757 | } | |
758 | } | |
759 | ||
760 | static void handle_ioreq(ioreq_t *req) | |
761 | { | |
762 | if (!req->data_is_ptr && (req->dir == IOREQ_WRITE) && | |
763 | (req->size < sizeof (target_ulong))) { | |
764 | req->data &= ((target_ulong) 1 << (8 * req->size)) - 1; | |
765 | } | |
766 | ||
767 | switch (req->type) { | |
768 | case IOREQ_TYPE_PIO: | |
769 | cpu_ioreq_pio(req); | |
770 | break; | |
771 | case IOREQ_TYPE_COPY: | |
772 | cpu_ioreq_move(req); | |
773 | break; | |
774 | case IOREQ_TYPE_TIMEOFFSET: | |
775 | break; | |
776 | case IOREQ_TYPE_INVALIDATE: | |
777 | xen_invalidate_map_cache(); | |
778 | break; | |
779 | default: | |
780 | hw_error("Invalid ioreq type 0x%x\n", req->type); | |
781 | } | |
782 | } | |
783 | ||
784 | static int handle_buffered_iopage(XenIOState *state) | |
785 | { | |
786 | buf_ioreq_t *buf_req = NULL; | |
787 | ioreq_t req; | |
788 | int qw; | |
789 | ||
790 | if (!state->buffered_io_page) { | |
791 | return 0; | |
792 | } | |
793 | ||
794 | memset(&req, 0x00, sizeof(req)); | |
795 | ||
796 | while (state->buffered_io_page->read_pointer != state->buffered_io_page->write_pointer) { | |
797 | buf_req = &state->buffered_io_page->buf_ioreq[ | |
798 | state->buffered_io_page->read_pointer % IOREQ_BUFFER_SLOT_NUM]; | |
799 | req.size = 1UL << buf_req->size; | |
800 | req.count = 1; | |
801 | req.addr = buf_req->addr; | |
802 | req.data = buf_req->data; | |
803 | req.state = STATE_IOREQ_READY; | |
804 | req.dir = buf_req->dir; | |
805 | req.df = 1; | |
806 | req.type = buf_req->type; | |
807 | req.data_is_ptr = 0; | |
808 | qw = (req.size == 8); | |
809 | if (qw) { | |
810 | buf_req = &state->buffered_io_page->buf_ioreq[ | |
811 | (state->buffered_io_page->read_pointer + 1) % IOREQ_BUFFER_SLOT_NUM]; | |
812 | req.data |= ((uint64_t)buf_req->data) << 32; | |
813 | } | |
814 | ||
815 | handle_ioreq(&req); | |
816 | ||
817 | xen_mb(); | |
818 | state->buffered_io_page->read_pointer += qw ? 2 : 1; | |
819 | } | |
820 | ||
821 | return req.count; | |
822 | } | |
823 | ||
824 | static void handle_buffered_io(void *opaque) | |
825 | { | |
826 | XenIOState *state = opaque; | |
827 | ||
828 | if (handle_buffered_iopage(state)) { | |
829 | qemu_mod_timer(state->buffered_io_timer, | |
830 | BUFFER_IO_MAX_DELAY + qemu_get_clock_ms(rt_clock)); | |
831 | } else { | |
832 | qemu_del_timer(state->buffered_io_timer); | |
833 | xc_evtchn_unmask(state->xce_handle, state->bufioreq_local_port); | |
834 | } | |
835 | } | |
836 | ||
837 | static void cpu_handle_ioreq(void *opaque) | |
838 | { | |
839 | XenIOState *state = opaque; | |
840 | ioreq_t *req = cpu_get_ioreq(state); | |
841 | ||
842 | handle_buffered_iopage(state); | |
843 | if (req) { | |
844 | handle_ioreq(req); | |
845 | ||
846 | if (req->state != STATE_IOREQ_INPROCESS) { | |
847 | fprintf(stderr, "Badness in I/O request ... not in service?!: " | |
848 | "%x, ptr: %x, port: %"PRIx64", " | |
849 | "data: %"PRIx64", count: %" FMT_ioreq_size ", size: %" FMT_ioreq_size "\n", | |
850 | req->state, req->data_is_ptr, req->addr, | |
851 | req->data, req->count, req->size); | |
852 | destroy_hvm_domain(false); | |
853 | return; | |
854 | } | |
855 | ||
856 | xen_wmb(); /* Update ioreq contents /then/ update state. */ | |
857 | ||
858 | /* | |
859 | * We do this before we send the response so that the tools | |
860 | * have the opportunity to pick up on the reset before the | |
861 | * guest resumes and does a hlt with interrupts disabled which | |
862 | * causes Xen to powerdown the domain. | |
863 | */ | |
864 | if (runstate_is_running()) { | |
865 | if (qemu_shutdown_requested_get()) { | |
866 | destroy_hvm_domain(false); | |
867 | } | |
868 | if (qemu_reset_requested_get()) { | |
869 | qemu_system_reset(VMRESET_REPORT); | |
870 | destroy_hvm_domain(true); | |
871 | } | |
872 | } | |
873 | ||
874 | req->state = STATE_IORESP_READY; | |
875 | xc_evtchn_notify(state->xce_handle, state->ioreq_local_port[state->send_vcpu]); | |
876 | } | |
877 | } | |
878 | ||
879 | static int store_dev_info(int domid, CharDriverState *cs, const char *string) | |
880 | { | |
881 | struct xs_handle *xs = NULL; | |
882 | char *path = NULL; | |
883 | char *newpath = NULL; | |
884 | char *pts = NULL; | |
885 | int ret = -1; | |
886 | ||
887 | /* Only continue if we're talking to a pty. */ | |
888 | if (strncmp(cs->filename, "pty:", 4)) { | |
889 | return 0; | |
890 | } | |
891 | pts = cs->filename + 4; | |
892 | ||
893 | /* We now have everything we need to set the xenstore entry. */ | |
894 | xs = xs_open(0); | |
895 | if (xs == NULL) { | |
896 | fprintf(stderr, "Could not contact XenStore\n"); | |
897 | goto out; | |
898 | } | |
899 | ||
900 | path = xs_get_domain_path(xs, domid); | |
901 | if (path == NULL) { | |
902 | fprintf(stderr, "xs_get_domain_path() error\n"); | |
903 | goto out; | |
904 | } | |
905 | newpath = realloc(path, (strlen(path) + strlen(string) + | |
906 | strlen("/tty") + 1)); | |
907 | if (newpath == NULL) { | |
908 | fprintf(stderr, "realloc error\n"); | |
909 | goto out; | |
910 | } | |
911 | path = newpath; | |
912 | ||
913 | strcat(path, string); | |
914 | strcat(path, "/tty"); | |
915 | if (!xs_write(xs, XBT_NULL, path, pts, strlen(pts))) { | |
916 | fprintf(stderr, "xs_write for '%s' fail", string); | |
917 | goto out; | |
918 | } | |
919 | ret = 0; | |
920 | ||
921 | out: | |
922 | free(path); | |
923 | xs_close(xs); | |
924 | ||
925 | return ret; | |
926 | } | |
927 | ||
928 | void xenstore_store_pv_console_info(int i, CharDriverState *chr) | |
929 | { | |
930 | if (i == 0) { | |
931 | store_dev_info(xen_domid, chr, "/console"); | |
932 | } else { | |
933 | char buf[32]; | |
934 | snprintf(buf, sizeof(buf), "/device/console/%d", i); | |
935 | store_dev_info(xen_domid, chr, buf); | |
936 | } | |
937 | } | |
938 | ||
939 | static void xenstore_record_dm_state(struct xs_handle *xs, const char *state) | |
940 | { | |
941 | char path[50]; | |
942 | ||
943 | if (xs == NULL) { | |
944 | fprintf(stderr, "xenstore connection not initialized\n"); | |
945 | exit(1); | |
946 | } | |
947 | ||
948 | snprintf(path, sizeof (path), "/local/domain/0/device-model/%u/state", xen_domid); | |
949 | if (!xs_write(xs, XBT_NULL, path, state, strlen(state))) { | |
950 | fprintf(stderr, "error recording dm state\n"); | |
951 | exit(1); | |
952 | } | |
953 | } | |
954 | ||
955 | static void xen_main_loop_prepare(XenIOState *state) | |
956 | { | |
957 | int evtchn_fd = -1; | |
958 | ||
959 | if (state->xce_handle != XC_HANDLER_INITIAL_VALUE) { | |
960 | evtchn_fd = xc_evtchn_fd(state->xce_handle); | |
961 | } | |
962 | ||
963 | state->buffered_io_timer = qemu_new_timer_ms(rt_clock, handle_buffered_io, | |
964 | state); | |
965 | ||
966 | if (evtchn_fd != -1) { | |
967 | qemu_set_fd_handler(evtchn_fd, cpu_handle_ioreq, NULL, state); | |
968 | } | |
969 | } | |
970 | ||
971 | ||
972 | /* Initialise Xen */ | |
973 | ||
974 | static void xen_change_state_handler(void *opaque, int running, | |
975 | RunState state) | |
976 | { | |
977 | if (running) { | |
978 | /* record state running */ | |
979 | xenstore_record_dm_state(xenstore, "running"); | |
980 | } | |
981 | } | |
982 | ||
983 | static void xen_hvm_change_state_handler(void *opaque, int running, | |
984 | RunState rstate) | |
985 | { | |
986 | XenIOState *xstate = opaque; | |
987 | if (running) { | |
988 | xen_main_loop_prepare(xstate); | |
989 | } | |
990 | } | |
991 | ||
992 | static void xen_exit_notifier(Notifier *n, void *data) | |
993 | { | |
994 | XenIOState *state = container_of(n, XenIOState, exit); | |
995 | ||
996 | xc_evtchn_close(state->xce_handle); | |
997 | xs_daemon_close(state->xenstore); | |
998 | } | |
999 | ||
1000 | int xen_init(void) | |
1001 | { | |
1002 | xen_xc = xen_xc_interface_open(0, 0, 0); | |
1003 | if (xen_xc == XC_HANDLER_INITIAL_VALUE) { | |
1004 | xen_be_printf(NULL, 0, "can't open xen interface\n"); | |
1005 | return -1; | |
1006 | } | |
1007 | qemu_add_vm_change_state_handler(xen_change_state_handler, NULL); | |
1008 | ||
1009 | return 0; | |
1010 | } | |
1011 | ||
1012 | static void xen_read_physmap(XenIOState *state) | |
1013 | { | |
1014 | XenPhysmap *physmap = NULL; | |
1015 | unsigned int len, num, i; | |
1016 | char path[80], *value = NULL; | |
1017 | char **entries = NULL; | |
1018 | ||
1019 | snprintf(path, sizeof(path), | |
1020 | "/local/domain/0/device-model/%d/physmap", xen_domid); | |
1021 | entries = xs_directory(state->xenstore, 0, path, &num); | |
1022 | if (entries == NULL) | |
1023 | return; | |
1024 | ||
1025 | for (i = 0; i < num; i++) { | |
1026 | physmap = g_malloc(sizeof (XenPhysmap)); | |
1027 | physmap->phys_offset = strtoull(entries[i], NULL, 16); | |
1028 | snprintf(path, sizeof(path), | |
1029 | "/local/domain/0/device-model/%d/physmap/%s/start_addr", | |
1030 | xen_domid, entries[i]); | |
1031 | value = xs_read(state->xenstore, 0, path, &len); | |
1032 | if (value == NULL) { | |
1033 | g_free(physmap); | |
1034 | continue; | |
1035 | } | |
1036 | physmap->start_addr = strtoull(value, NULL, 16); | |
1037 | free(value); | |
1038 | ||
1039 | snprintf(path, sizeof(path), | |
1040 | "/local/domain/0/device-model/%d/physmap/%s/size", | |
1041 | xen_domid, entries[i]); | |
1042 | value = xs_read(state->xenstore, 0, path, &len); | |
1043 | if (value == NULL) { | |
1044 | g_free(physmap); | |
1045 | continue; | |
1046 | } | |
1047 | physmap->size = strtoull(value, NULL, 16); | |
1048 | free(value); | |
1049 | ||
1050 | snprintf(path, sizeof(path), | |
1051 | "/local/domain/0/device-model/%d/physmap/%s/name", | |
1052 | xen_domid, entries[i]); | |
1053 | physmap->name = xs_read(state->xenstore, 0, path, &len); | |
1054 | ||
1055 | QLIST_INSERT_HEAD(&state->physmap, physmap, list); | |
1056 | } | |
1057 | free(entries); | |
1058 | } | |
1059 | ||
1060 | int xen_hvm_init(void) | |
1061 | { | |
1062 | int i, rc; | |
1063 | unsigned long ioreq_pfn; | |
1064 | unsigned long bufioreq_evtchn; | |
1065 | XenIOState *state; | |
1066 | ||
1067 | state = g_malloc0(sizeof (XenIOState)); | |
1068 | ||
1069 | state->xce_handle = xen_xc_evtchn_open(NULL, 0); | |
1070 | if (state->xce_handle == XC_HANDLER_INITIAL_VALUE) { | |
1071 | perror("xen: event channel open"); | |
1072 | g_free(state); | |
1073 | return -errno; | |
1074 | } | |
1075 | ||
1076 | state->xenstore = xs_daemon_open(); | |
1077 | if (state->xenstore == NULL) { | |
1078 | perror("xen: xenstore open"); | |
1079 | g_free(state); | |
1080 | return -errno; | |
1081 | } | |
1082 | ||
1083 | state->exit.notify = xen_exit_notifier; | |
1084 | qemu_add_exit_notifier(&state->exit); | |
1085 | ||
1086 | state->suspend.notify = xen_suspend_notifier; | |
1087 | qemu_register_suspend_notifier(&state->suspend); | |
1088 | ||
1089 | xc_get_hvm_param(xen_xc, xen_domid, HVM_PARAM_IOREQ_PFN, &ioreq_pfn); | |
1090 | DPRINTF("shared page at pfn %lx\n", ioreq_pfn); | |
1091 | state->shared_page = xc_map_foreign_range(xen_xc, xen_domid, XC_PAGE_SIZE, | |
1092 | PROT_READ|PROT_WRITE, ioreq_pfn); | |
1093 | if (state->shared_page == NULL) { | |
1094 | hw_error("map shared IO page returned error %d handle=" XC_INTERFACE_FMT, | |
1095 | errno, xen_xc); | |
1096 | } | |
1097 | ||
1098 | xc_get_hvm_param(xen_xc, xen_domid, HVM_PARAM_BUFIOREQ_PFN, &ioreq_pfn); | |
1099 | DPRINTF("buffered io page at pfn %lx\n", ioreq_pfn); | |
1100 | state->buffered_io_page = xc_map_foreign_range(xen_xc, xen_domid, XC_PAGE_SIZE, | |
1101 | PROT_READ|PROT_WRITE, ioreq_pfn); | |
1102 | if (state->buffered_io_page == NULL) { | |
1103 | hw_error("map buffered IO page returned error %d", errno); | |
1104 | } | |
1105 | ||
1106 | state->ioreq_local_port = g_malloc0(smp_cpus * sizeof (evtchn_port_t)); | |
1107 | ||
1108 | /* FIXME: how about if we overflow the page here? */ | |
1109 | for (i = 0; i < smp_cpus; i++) { | |
1110 | rc = xc_evtchn_bind_interdomain(state->xce_handle, xen_domid, | |
1111 | xen_vcpu_eport(state->shared_page, i)); | |
1112 | if (rc == -1) { | |
1113 | fprintf(stderr, "bind interdomain ioctl error %d\n", errno); | |
1114 | return -1; | |
1115 | } | |
1116 | state->ioreq_local_port[i] = rc; | |
1117 | } | |
1118 | ||
1119 | rc = xc_get_hvm_param(xen_xc, xen_domid, HVM_PARAM_BUFIOREQ_EVTCHN, | |
1120 | &bufioreq_evtchn); | |
1121 | if (rc < 0) { | |
1122 | fprintf(stderr, "failed to get HVM_PARAM_BUFIOREQ_EVTCHN\n"); | |
1123 | return -1; | |
1124 | } | |
1125 | rc = xc_evtchn_bind_interdomain(state->xce_handle, xen_domid, | |
1126 | (uint32_t)bufioreq_evtchn); | |
1127 | if (rc == -1) { | |
1128 | fprintf(stderr, "bind interdomain ioctl error %d\n", errno); | |
1129 | return -1; | |
1130 | } | |
1131 | state->bufioreq_local_port = rc; | |
1132 | ||
1133 | /* Init RAM management */ | |
1134 | xen_map_cache_init(xen_phys_offset_to_gaddr, state); | |
1135 | xen_ram_init(ram_size); | |
1136 | ||
1137 | qemu_add_vm_change_state_handler(xen_hvm_change_state_handler, state); | |
1138 | ||
1139 | state->memory_listener = xen_memory_listener; | |
1140 | QLIST_INIT(&state->physmap); | |
1141 | memory_listener_register(&state->memory_listener, &address_space_memory); | |
1142 | state->log_for_dirtybit = NULL; | |
1143 | ||
1144 | /* Initialize backend core & drivers */ | |
1145 | if (xen_be_init() != 0) { | |
1146 | fprintf(stderr, "%s: xen backend core setup failed\n", __FUNCTION__); | |
1147 | exit(1); | |
1148 | } | |
1149 | xen_be_register("console", &xen_console_ops); | |
1150 | xen_be_register("vkbd", &xen_kbdmouse_ops); | |
1151 | xen_be_register("qdisk", &xen_blkdev_ops); | |
1152 | xen_read_physmap(state); | |
1153 | ||
1154 | return 0; | |
1155 | } | |
1156 | ||
1157 | void destroy_hvm_domain(bool reboot) | |
1158 | { | |
1159 | XenXC xc_handle; | |
1160 | int sts; | |
1161 | ||
1162 | xc_handle = xen_xc_interface_open(0, 0, 0); | |
1163 | if (xc_handle == XC_HANDLER_INITIAL_VALUE) { | |
1164 | fprintf(stderr, "Cannot acquire xenctrl handle\n"); | |
1165 | } else { | |
1166 | sts = xc_domain_shutdown(xc_handle, xen_domid, | |
1167 | reboot ? SHUTDOWN_reboot : SHUTDOWN_poweroff); | |
1168 | if (sts != 0) { | |
1169 | fprintf(stderr, "xc_domain_shutdown failed to issue %s, " | |
1170 | "sts %d, %s\n", reboot ? "reboot" : "poweroff", | |
1171 | sts, strerror(errno)); | |
1172 | } else { | |
1173 | fprintf(stderr, "Issued domain %d %s\n", xen_domid, | |
1174 | reboot ? "reboot" : "poweroff"); | |
1175 | } | |
1176 | xc_interface_close(xc_handle); | |
1177 | } | |
1178 | } | |
1179 | ||
1180 | void xen_register_framebuffer(MemoryRegion *mr) | |
1181 | { | |
1182 | framebuffer = mr; | |
1183 | } | |
1184 | ||
1185 | void xen_shutdown_fatal_error(const char *fmt, ...) | |
1186 | { | |
1187 | va_list ap; | |
1188 | ||
1189 | va_start(ap, fmt); | |
1190 | vfprintf(stderr, fmt, ap); | |
1191 | va_end(ap); | |
1192 | fprintf(stderr, "Will destroy the domain.\n"); | |
1193 | /* destroy the domain */ | |
1194 | qemu_system_shutdown_request(); | |
1195 | } | |
1196 | ||
1197 | void xen_modified_memory(ram_addr_t start, ram_addr_t length) | |
1198 | { | |
1199 | if (unlikely(xen_in_migration)) { | |
1200 | int rc; | |
1201 | ram_addr_t start_pfn, nb_pages; | |
1202 | ||
1203 | if (length == 0) { | |
1204 | length = TARGET_PAGE_SIZE; | |
1205 | } | |
1206 | start_pfn = start >> TARGET_PAGE_BITS; | |
1207 | nb_pages = ((start + length + TARGET_PAGE_SIZE - 1) >> TARGET_PAGE_BITS) | |
1208 | - start_pfn; | |
1209 | rc = xc_hvm_modified_memory(xen_xc, xen_domid, start_pfn, nb_pages); | |
1210 | if (rc) { | |
1211 | fprintf(stderr, | |
1212 | "%s failed for "RAM_ADDR_FMT" ("RAM_ADDR_FMT"): %i, %s\n", | |
1213 | __func__, start, nb_pages, rc, strerror(-rc)); | |
1214 | } | |
1215 | } | |
1216 | } |