]>
Commit | Line | Data |
---|---|---|
1 | /* | |
2 | * Copyright (C) 2010 Citrix Ltd. | |
3 | * | |
4 | * This work is licensed under the terms of the GNU GPL, version 2. See | |
5 | * the COPYING file in the top-level directory. | |
6 | * | |
7 | * Contributions after 2012-01-13 are licensed under the terms of the | |
8 | * GNU GPL, version 2 or (at your option) any later version. | |
9 | */ | |
10 | ||
11 | #include <sys/mman.h> | |
12 | ||
13 | #include "hw/pci.h" | |
14 | #include "hw/pc.h" | |
15 | #include "hw/xen_common.h" | |
16 | #include "hw/xen_backend.h" | |
17 | #include "qmp-commands.h" | |
18 | ||
19 | #include "range.h" | |
20 | #include "xen-mapcache.h" | |
21 | #include "trace.h" | |
22 | #include "exec-memory.h" | |
23 | ||
24 | #include <xen/hvm/ioreq.h> | |
25 | #include <xen/hvm/params.h> | |
26 | #include <xen/hvm/e820.h> | |
27 | ||
28 | //#define DEBUG_XEN | |
29 | ||
30 | #ifdef DEBUG_XEN | |
31 | #define DPRINTF(fmt, ...) \ | |
32 | do { fprintf(stderr, "xen: " fmt, ## __VA_ARGS__); } while (0) | |
33 | #else | |
34 | #define DPRINTF(fmt, ...) \ | |
35 | do { } while (0) | |
36 | #endif | |
37 | ||
38 | static MemoryRegion ram_memory, ram_640k, ram_lo, ram_hi; | |
39 | static MemoryRegion *framebuffer; | |
40 | static bool xen_in_migration; | |
41 | ||
42 | /* Compatibility with older version */ | |
43 | #if __XEN_LATEST_INTERFACE_VERSION__ < 0x0003020a | |
44 | static inline uint32_t xen_vcpu_eport(shared_iopage_t *shared_page, int i) | |
45 | { | |
46 | return shared_page->vcpu_iodata[i].vp_eport; | |
47 | } | |
48 | static inline ioreq_t *xen_vcpu_ioreq(shared_iopage_t *shared_page, int vcpu) | |
49 | { | |
50 | return &shared_page->vcpu_iodata[vcpu].vp_ioreq; | |
51 | } | |
52 | # define FMT_ioreq_size PRIx64 | |
53 | #else | |
54 | static inline uint32_t xen_vcpu_eport(shared_iopage_t *shared_page, int i) | |
55 | { | |
56 | return shared_page->vcpu_ioreq[i].vp_eport; | |
57 | } | |
58 | static inline ioreq_t *xen_vcpu_ioreq(shared_iopage_t *shared_page, int vcpu) | |
59 | { | |
60 | return &shared_page->vcpu_ioreq[vcpu]; | |
61 | } | |
62 | # define FMT_ioreq_size "u" | |
63 | #endif | |
64 | #ifndef HVM_PARAM_BUFIOREQ_EVTCHN | |
65 | #define HVM_PARAM_BUFIOREQ_EVTCHN 26 | |
66 | #endif | |
67 | ||
68 | #define BUFFER_IO_MAX_DELAY 100 | |
69 | ||
70 | typedef struct XenPhysmap { | |
71 | hwaddr start_addr; | |
72 | ram_addr_t size; | |
73 | char *name; | |
74 | hwaddr phys_offset; | |
75 | ||
76 | QLIST_ENTRY(XenPhysmap) list; | |
77 | } XenPhysmap; | |
78 | ||
79 | typedef struct XenIOState { | |
80 | shared_iopage_t *shared_page; | |
81 | buffered_iopage_t *buffered_io_page; | |
82 | QEMUTimer *buffered_io_timer; | |
83 | /* the evtchn port for polling the notification, */ | |
84 | evtchn_port_t *ioreq_local_port; | |
85 | /* evtchn local port for buffered io */ | |
86 | evtchn_port_t bufioreq_local_port; | |
87 | /* the evtchn fd for polling */ | |
88 | XenEvtchn xce_handle; | |
89 | /* which vcpu we are serving */ | |
90 | int send_vcpu; | |
91 | ||
92 | struct xs_handle *xenstore; | |
93 | MemoryListener memory_listener; | |
94 | QLIST_HEAD(, XenPhysmap) physmap; | |
95 | hwaddr free_phys_offset; | |
96 | const XenPhysmap *log_for_dirtybit; | |
97 | ||
98 | Notifier exit; | |
99 | Notifier suspend; | |
100 | } XenIOState; | |
101 | ||
102 | /* Xen specific function for piix pci */ | |
103 | ||
104 | int xen_pci_slot_get_pirq(PCIDevice *pci_dev, int irq_num) | |
105 | { | |
106 | return irq_num + ((pci_dev->devfn >> 3) << 2); | |
107 | } | |
108 | ||
109 | void xen_piix3_set_irq(void *opaque, int irq_num, int level) | |
110 | { | |
111 | xc_hvm_set_pci_intx_level(xen_xc, xen_domid, 0, 0, irq_num >> 2, | |
112 | irq_num & 3, level); | |
113 | } | |
114 | ||
115 | void xen_piix_pci_write_config_client(uint32_t address, uint32_t val, int len) | |
116 | { | |
117 | int i; | |
118 | ||
119 | /* Scan for updates to PCI link routes (0x60-0x63). */ | |
120 | for (i = 0; i < len; i++) { | |
121 | uint8_t v = (val >> (8 * i)) & 0xff; | |
122 | if (v & 0x80) { | |
123 | v = 0; | |
124 | } | |
125 | v &= 0xf; | |
126 | if (((address + i) >= 0x60) && ((address + i) <= 0x63)) { | |
127 | xc_hvm_set_pci_link_route(xen_xc, xen_domid, address + i - 0x60, v); | |
128 | } | |
129 | } | |
130 | } | |
131 | ||
132 | void xen_hvm_inject_msi(uint64_t addr, uint32_t data) | |
133 | { | |
134 | xen_xc_hvm_inject_msi(xen_xc, xen_domid, addr, data); | |
135 | } | |
136 | ||
137 | static void xen_suspend_notifier(Notifier *notifier, void *data) | |
138 | { | |
139 | xc_set_hvm_param(xen_xc, xen_domid, HVM_PARAM_ACPI_S_STATE, 3); | |
140 | } | |
141 | ||
142 | /* Xen Interrupt Controller */ | |
143 | ||
144 | static void xen_set_irq(void *opaque, int irq, int level) | |
145 | { | |
146 | xc_hvm_set_isa_irq_level(xen_xc, xen_domid, irq, level); | |
147 | } | |
148 | ||
149 | qemu_irq *xen_interrupt_controller_init(void) | |
150 | { | |
151 | return qemu_allocate_irqs(xen_set_irq, NULL, 16); | |
152 | } | |
153 | ||
154 | /* Memory Ops */ | |
155 | ||
156 | static void xen_ram_init(ram_addr_t ram_size) | |
157 | { | |
158 | MemoryRegion *sysmem = get_system_memory(); | |
159 | ram_addr_t below_4g_mem_size, above_4g_mem_size = 0; | |
160 | ram_addr_t block_len; | |
161 | ||
162 | block_len = ram_size; | |
163 | if (ram_size >= HVM_BELOW_4G_RAM_END) { | |
164 | /* Xen does not allocate the memory continuously, and keep a hole at | |
165 | * HVM_BELOW_4G_MMIO_START of HVM_BELOW_4G_MMIO_LENGTH | |
166 | */ | |
167 | block_len += HVM_BELOW_4G_MMIO_LENGTH; | |
168 | } | |
169 | memory_region_init_ram(&ram_memory, "xen.ram", block_len); | |
170 | vmstate_register_ram_global(&ram_memory); | |
171 | ||
172 | if (ram_size >= HVM_BELOW_4G_RAM_END) { | |
173 | above_4g_mem_size = ram_size - HVM_BELOW_4G_RAM_END; | |
174 | below_4g_mem_size = HVM_BELOW_4G_RAM_END; | |
175 | } else { | |
176 | below_4g_mem_size = ram_size; | |
177 | } | |
178 | ||
179 | memory_region_init_alias(&ram_640k, "xen.ram.640k", | |
180 | &ram_memory, 0, 0xa0000); | |
181 | memory_region_add_subregion(sysmem, 0, &ram_640k); | |
182 | /* Skip of the VGA IO memory space, it will be registered later by the VGA | |
183 | * emulated device. | |
184 | * | |
185 | * The area between 0xc0000 and 0x100000 will be used by SeaBIOS to load | |
186 | * the Options ROM, so it is registered here as RAM. | |
187 | */ | |
188 | memory_region_init_alias(&ram_lo, "xen.ram.lo", | |
189 | &ram_memory, 0xc0000, below_4g_mem_size - 0xc0000); | |
190 | memory_region_add_subregion(sysmem, 0xc0000, &ram_lo); | |
191 | if (above_4g_mem_size > 0) { | |
192 | memory_region_init_alias(&ram_hi, "xen.ram.hi", | |
193 | &ram_memory, 0x100000000ULL, | |
194 | above_4g_mem_size); | |
195 | memory_region_add_subregion(sysmem, 0x100000000ULL, &ram_hi); | |
196 | } | |
197 | } | |
198 | ||
199 | void xen_ram_alloc(ram_addr_t ram_addr, ram_addr_t size, MemoryRegion *mr) | |
200 | { | |
201 | unsigned long nr_pfn; | |
202 | xen_pfn_t *pfn_list; | |
203 | int i; | |
204 | ||
205 | if (runstate_check(RUN_STATE_INMIGRATE)) { | |
206 | /* RAM already populated in Xen */ | |
207 | fprintf(stderr, "%s: do not alloc "RAM_ADDR_FMT | |
208 | " bytes of ram at "RAM_ADDR_FMT" when runstate is INMIGRATE\n", | |
209 | __func__, size, ram_addr); | |
210 | return; | |
211 | } | |
212 | ||
213 | if (mr == &ram_memory) { | |
214 | return; | |
215 | } | |
216 | ||
217 | trace_xen_ram_alloc(ram_addr, size); | |
218 | ||
219 | nr_pfn = size >> TARGET_PAGE_BITS; | |
220 | pfn_list = g_malloc(sizeof (*pfn_list) * nr_pfn); | |
221 | ||
222 | for (i = 0; i < nr_pfn; i++) { | |
223 | pfn_list[i] = (ram_addr >> TARGET_PAGE_BITS) + i; | |
224 | } | |
225 | ||
226 | if (xc_domain_populate_physmap_exact(xen_xc, xen_domid, nr_pfn, 0, 0, pfn_list)) { | |
227 | hw_error("xen: failed to populate ram at " RAM_ADDR_FMT, ram_addr); | |
228 | } | |
229 | ||
230 | g_free(pfn_list); | |
231 | } | |
232 | ||
233 | static XenPhysmap *get_physmapping(XenIOState *state, | |
234 | hwaddr start_addr, ram_addr_t size) | |
235 | { | |
236 | XenPhysmap *physmap = NULL; | |
237 | ||
238 | start_addr &= TARGET_PAGE_MASK; | |
239 | ||
240 | QLIST_FOREACH(physmap, &state->physmap, list) { | |
241 | if (range_covers_byte(physmap->start_addr, physmap->size, start_addr)) { | |
242 | return physmap; | |
243 | } | |
244 | } | |
245 | return NULL; | |
246 | } | |
247 | ||
248 | static hwaddr xen_phys_offset_to_gaddr(hwaddr start_addr, | |
249 | ram_addr_t size, void *opaque) | |
250 | { | |
251 | hwaddr addr = start_addr & TARGET_PAGE_MASK; | |
252 | XenIOState *xen_io_state = opaque; | |
253 | XenPhysmap *physmap = NULL; | |
254 | ||
255 | QLIST_FOREACH(physmap, &xen_io_state->physmap, list) { | |
256 | if (range_covers_byte(physmap->phys_offset, physmap->size, addr)) { | |
257 | return physmap->start_addr; | |
258 | } | |
259 | } | |
260 | ||
261 | return start_addr; | |
262 | } | |
263 | ||
264 | #if CONFIG_XEN_CTRL_INTERFACE_VERSION >= 340 | |
265 | static int xen_add_to_physmap(XenIOState *state, | |
266 | hwaddr start_addr, | |
267 | ram_addr_t size, | |
268 | MemoryRegion *mr, | |
269 | hwaddr offset_within_region) | |
270 | { | |
271 | unsigned long i = 0; | |
272 | int rc = 0; | |
273 | XenPhysmap *physmap = NULL; | |
274 | hwaddr pfn, start_gpfn; | |
275 | hwaddr phys_offset = memory_region_get_ram_addr(mr); | |
276 | char path[80], value[17]; | |
277 | ||
278 | if (get_physmapping(state, start_addr, size)) { | |
279 | return 0; | |
280 | } | |
281 | if (size <= 0) { | |
282 | return -1; | |
283 | } | |
284 | ||
285 | /* Xen can only handle a single dirty log region for now and we want | |
286 | * the linear framebuffer to be that region. | |
287 | * Avoid tracking any regions that is not videoram and avoid tracking | |
288 | * the legacy vga region. */ | |
289 | if (mr == framebuffer && start_addr > 0xbffff) { | |
290 | goto go_physmap; | |
291 | } | |
292 | return -1; | |
293 | ||
294 | go_physmap: | |
295 | DPRINTF("mapping vram to %"HWADDR_PRIx" - %"HWADDR_PRIx"\n", | |
296 | start_addr, start_addr + size); | |
297 | ||
298 | pfn = phys_offset >> TARGET_PAGE_BITS; | |
299 | start_gpfn = start_addr >> TARGET_PAGE_BITS; | |
300 | for (i = 0; i < size >> TARGET_PAGE_BITS; i++) { | |
301 | unsigned long idx = pfn + i; | |
302 | xen_pfn_t gpfn = start_gpfn + i; | |
303 | ||
304 | rc = xc_domain_add_to_physmap(xen_xc, xen_domid, XENMAPSPACE_gmfn, idx, gpfn); | |
305 | if (rc) { | |
306 | DPRINTF("add_to_physmap MFN %"PRI_xen_pfn" to PFN %" | |
307 | PRI_xen_pfn" failed: %d\n", idx, gpfn, rc); | |
308 | return -rc; | |
309 | } | |
310 | } | |
311 | ||
312 | physmap = g_malloc(sizeof (XenPhysmap)); | |
313 | ||
314 | physmap->start_addr = start_addr; | |
315 | physmap->size = size; | |
316 | physmap->name = (char *)mr->name; | |
317 | physmap->phys_offset = phys_offset; | |
318 | ||
319 | QLIST_INSERT_HEAD(&state->physmap, physmap, list); | |
320 | ||
321 | xc_domain_pin_memory_cacheattr(xen_xc, xen_domid, | |
322 | start_addr >> TARGET_PAGE_BITS, | |
323 | (start_addr + size) >> TARGET_PAGE_BITS, | |
324 | XEN_DOMCTL_MEM_CACHEATTR_WB); | |
325 | ||
326 | snprintf(path, sizeof(path), | |
327 | "/local/domain/0/device-model/%d/physmap/%"PRIx64"/start_addr", | |
328 | xen_domid, (uint64_t)phys_offset); | |
329 | snprintf(value, sizeof(value), "%"PRIx64, (uint64_t)start_addr); | |
330 | if (!xs_write(state->xenstore, 0, path, value, strlen(value))) { | |
331 | return -1; | |
332 | } | |
333 | snprintf(path, sizeof(path), | |
334 | "/local/domain/0/device-model/%d/physmap/%"PRIx64"/size", | |
335 | xen_domid, (uint64_t)phys_offset); | |
336 | snprintf(value, sizeof(value), "%"PRIx64, (uint64_t)size); | |
337 | if (!xs_write(state->xenstore, 0, path, value, strlen(value))) { | |
338 | return -1; | |
339 | } | |
340 | if (mr->name) { | |
341 | snprintf(path, sizeof(path), | |
342 | "/local/domain/0/device-model/%d/physmap/%"PRIx64"/name", | |
343 | xen_domid, (uint64_t)phys_offset); | |
344 | if (!xs_write(state->xenstore, 0, path, mr->name, strlen(mr->name))) { | |
345 | return -1; | |
346 | } | |
347 | } | |
348 | ||
349 | return 0; | |
350 | } | |
351 | ||
352 | static int xen_remove_from_physmap(XenIOState *state, | |
353 | hwaddr start_addr, | |
354 | ram_addr_t size) | |
355 | { | |
356 | unsigned long i = 0; | |
357 | int rc = 0; | |
358 | XenPhysmap *physmap = NULL; | |
359 | hwaddr phys_offset = 0; | |
360 | ||
361 | physmap = get_physmapping(state, start_addr, size); | |
362 | if (physmap == NULL) { | |
363 | return -1; | |
364 | } | |
365 | ||
366 | phys_offset = physmap->phys_offset; | |
367 | size = physmap->size; | |
368 | ||
369 | DPRINTF("unmapping vram to %"HWADDR_PRIx" - %"HWADDR_PRIx", from ", | |
370 | "%"HWADDR_PRIx"\n", phys_offset, phys_offset + size, start_addr); | |
371 | ||
372 | size >>= TARGET_PAGE_BITS; | |
373 | start_addr >>= TARGET_PAGE_BITS; | |
374 | phys_offset >>= TARGET_PAGE_BITS; | |
375 | for (i = 0; i < size; i++) { | |
376 | unsigned long idx = start_addr + i; | |
377 | xen_pfn_t gpfn = phys_offset + i; | |
378 | ||
379 | rc = xc_domain_add_to_physmap(xen_xc, xen_domid, XENMAPSPACE_gmfn, idx, gpfn); | |
380 | if (rc) { | |
381 | fprintf(stderr, "add_to_physmap MFN %"PRI_xen_pfn" to PFN %" | |
382 | PRI_xen_pfn" failed: %d\n", idx, gpfn, rc); | |
383 | return -rc; | |
384 | } | |
385 | } | |
386 | ||
387 | QLIST_REMOVE(physmap, list); | |
388 | if (state->log_for_dirtybit == physmap) { | |
389 | state->log_for_dirtybit = NULL; | |
390 | } | |
391 | free(physmap); | |
392 | ||
393 | return 0; | |
394 | } | |
395 | ||
396 | #else | |
397 | static int xen_add_to_physmap(XenIOState *state, | |
398 | hwaddr start_addr, | |
399 | ram_addr_t size, | |
400 | MemoryRegion *mr, | |
401 | hwaddr offset_within_region) | |
402 | { | |
403 | return -ENOSYS; | |
404 | } | |
405 | ||
406 | static int xen_remove_from_physmap(XenIOState *state, | |
407 | hwaddr start_addr, | |
408 | ram_addr_t size) | |
409 | { | |
410 | return -ENOSYS; | |
411 | } | |
412 | #endif | |
413 | ||
414 | static void xen_set_memory(struct MemoryListener *listener, | |
415 | MemoryRegionSection *section, | |
416 | bool add) | |
417 | { | |
418 | XenIOState *state = container_of(listener, XenIOState, memory_listener); | |
419 | hwaddr start_addr = section->offset_within_address_space; | |
420 | ram_addr_t size = section->size; | |
421 | bool log_dirty = memory_region_is_logging(section->mr); | |
422 | hvmmem_type_t mem_type; | |
423 | ||
424 | if (!memory_region_is_ram(section->mr)) { | |
425 | return; | |
426 | } | |
427 | ||
428 | if (!(section->mr != &ram_memory | |
429 | && ( (log_dirty && add) || (!log_dirty && !add)))) { | |
430 | return; | |
431 | } | |
432 | ||
433 | trace_xen_client_set_memory(start_addr, size, log_dirty); | |
434 | ||
435 | start_addr &= TARGET_PAGE_MASK; | |
436 | size = TARGET_PAGE_ALIGN(size); | |
437 | ||
438 | if (add) { | |
439 | if (!memory_region_is_rom(section->mr)) { | |
440 | xen_add_to_physmap(state, start_addr, size, | |
441 | section->mr, section->offset_within_region); | |
442 | } else { | |
443 | mem_type = HVMMEM_ram_ro; | |
444 | if (xc_hvm_set_mem_type(xen_xc, xen_domid, mem_type, | |
445 | start_addr >> TARGET_PAGE_BITS, | |
446 | size >> TARGET_PAGE_BITS)) { | |
447 | DPRINTF("xc_hvm_set_mem_type error, addr: "TARGET_FMT_plx"\n", | |
448 | start_addr); | |
449 | } | |
450 | } | |
451 | } else { | |
452 | if (xen_remove_from_physmap(state, start_addr, size) < 0) { | |
453 | DPRINTF("physmapping does not exist at "TARGET_FMT_plx"\n", start_addr); | |
454 | } | |
455 | } | |
456 | } | |
457 | ||
458 | static void xen_region_add(MemoryListener *listener, | |
459 | MemoryRegionSection *section) | |
460 | { | |
461 | xen_set_memory(listener, section, true); | |
462 | } | |
463 | ||
464 | static void xen_region_del(MemoryListener *listener, | |
465 | MemoryRegionSection *section) | |
466 | { | |
467 | xen_set_memory(listener, section, false); | |
468 | } | |
469 | ||
470 | static void xen_sync_dirty_bitmap(XenIOState *state, | |
471 | hwaddr start_addr, | |
472 | ram_addr_t size) | |
473 | { | |
474 | hwaddr npages = size >> TARGET_PAGE_BITS; | |
475 | const int width = sizeof(unsigned long) * 8; | |
476 | unsigned long bitmap[(npages + width - 1) / width]; | |
477 | int rc, i, j; | |
478 | const XenPhysmap *physmap = NULL; | |
479 | ||
480 | physmap = get_physmapping(state, start_addr, size); | |
481 | if (physmap == NULL) { | |
482 | /* not handled */ | |
483 | return; | |
484 | } | |
485 | ||
486 | if (state->log_for_dirtybit == NULL) { | |
487 | state->log_for_dirtybit = physmap; | |
488 | } else if (state->log_for_dirtybit != physmap) { | |
489 | /* Only one range for dirty bitmap can be tracked. */ | |
490 | return; | |
491 | } | |
492 | ||
493 | rc = xc_hvm_track_dirty_vram(xen_xc, xen_domid, | |
494 | start_addr >> TARGET_PAGE_BITS, npages, | |
495 | bitmap); | |
496 | if (rc < 0) { | |
497 | if (rc != -ENODATA) { | |
498 | memory_region_set_dirty(framebuffer, 0, size); | |
499 | DPRINTF("xen: track_dirty_vram failed (0x" TARGET_FMT_plx | |
500 | ", 0x" TARGET_FMT_plx "): %s\n", | |
501 | start_addr, start_addr + size, strerror(-rc)); | |
502 | } | |
503 | return; | |
504 | } | |
505 | ||
506 | for (i = 0; i < ARRAY_SIZE(bitmap); i++) { | |
507 | unsigned long map = bitmap[i]; | |
508 | while (map != 0) { | |
509 | j = ffsl(map) - 1; | |
510 | map &= ~(1ul << j); | |
511 | memory_region_set_dirty(framebuffer, | |
512 | (i * width + j) * TARGET_PAGE_SIZE, | |
513 | TARGET_PAGE_SIZE); | |
514 | }; | |
515 | } | |
516 | } | |
517 | ||
518 | static void xen_log_start(MemoryListener *listener, | |
519 | MemoryRegionSection *section) | |
520 | { | |
521 | XenIOState *state = container_of(listener, XenIOState, memory_listener); | |
522 | ||
523 | xen_sync_dirty_bitmap(state, section->offset_within_address_space, | |
524 | section->size); | |
525 | } | |
526 | ||
527 | static void xen_log_stop(MemoryListener *listener, MemoryRegionSection *section) | |
528 | { | |
529 | XenIOState *state = container_of(listener, XenIOState, memory_listener); | |
530 | ||
531 | state->log_for_dirtybit = NULL; | |
532 | /* Disable dirty bit tracking */ | |
533 | xc_hvm_track_dirty_vram(xen_xc, xen_domid, 0, 0, NULL); | |
534 | } | |
535 | ||
536 | static void xen_log_sync(MemoryListener *listener, MemoryRegionSection *section) | |
537 | { | |
538 | XenIOState *state = container_of(listener, XenIOState, memory_listener); | |
539 | ||
540 | xen_sync_dirty_bitmap(state, section->offset_within_address_space, | |
541 | section->size); | |
542 | } | |
543 | ||
544 | static void xen_log_global_start(MemoryListener *listener) | |
545 | { | |
546 | if (xen_enabled()) { | |
547 | xen_in_migration = true; | |
548 | } | |
549 | } | |
550 | ||
551 | static void xen_log_global_stop(MemoryListener *listener) | |
552 | { | |
553 | xen_in_migration = false; | |
554 | } | |
555 | ||
556 | static MemoryListener xen_memory_listener = { | |
557 | .region_add = xen_region_add, | |
558 | .region_del = xen_region_del, | |
559 | .log_start = xen_log_start, | |
560 | .log_stop = xen_log_stop, | |
561 | .log_sync = xen_log_sync, | |
562 | .log_global_start = xen_log_global_start, | |
563 | .log_global_stop = xen_log_global_stop, | |
564 | .priority = 10, | |
565 | }; | |
566 | ||
567 | void qmp_xen_set_global_dirty_log(bool enable, Error **errp) | |
568 | { | |
569 | if (enable) { | |
570 | memory_global_dirty_log_start(); | |
571 | } else { | |
572 | memory_global_dirty_log_stop(); | |
573 | } | |
574 | } | |
575 | ||
576 | /* VCPU Operations, MMIO, IO ring ... */ | |
577 | ||
578 | static void xen_reset_vcpu(void *opaque) | |
579 | { | |
580 | CPUArchState *env = opaque; | |
581 | ||
582 | env->halted = 1; | |
583 | } | |
584 | ||
585 | void xen_vcpu_init(void) | |
586 | { | |
587 | CPUArchState *first_cpu; | |
588 | ||
589 | if ((first_cpu = qemu_get_cpu(0))) { | |
590 | qemu_register_reset(xen_reset_vcpu, first_cpu); | |
591 | xen_reset_vcpu(first_cpu); | |
592 | } | |
593 | /* if rtc_clock is left to default (host_clock), disable it */ | |
594 | if (rtc_clock == host_clock) { | |
595 | qemu_clock_enable(rtc_clock, false); | |
596 | } | |
597 | } | |
598 | ||
599 | /* get the ioreq packets from share mem */ | |
600 | static ioreq_t *cpu_get_ioreq_from_shared_memory(XenIOState *state, int vcpu) | |
601 | { | |
602 | ioreq_t *req = xen_vcpu_ioreq(state->shared_page, vcpu); | |
603 | ||
604 | if (req->state != STATE_IOREQ_READY) { | |
605 | DPRINTF("I/O request not ready: " | |
606 | "%x, ptr: %x, port: %"PRIx64", " | |
607 | "data: %"PRIx64", count: %" FMT_ioreq_size ", size: %" FMT_ioreq_size "\n", | |
608 | req->state, req->data_is_ptr, req->addr, | |
609 | req->data, req->count, req->size); | |
610 | return NULL; | |
611 | } | |
612 | ||
613 | xen_rmb(); /* see IOREQ_READY /then/ read contents of ioreq */ | |
614 | ||
615 | req->state = STATE_IOREQ_INPROCESS; | |
616 | return req; | |
617 | } | |
618 | ||
619 | /* use poll to get the port notification */ | |
620 | /* ioreq_vec--out,the */ | |
621 | /* retval--the number of ioreq packet */ | |
622 | static ioreq_t *cpu_get_ioreq(XenIOState *state) | |
623 | { | |
624 | int i; | |
625 | evtchn_port_t port; | |
626 | ||
627 | port = xc_evtchn_pending(state->xce_handle); | |
628 | if (port == state->bufioreq_local_port) { | |
629 | qemu_mod_timer(state->buffered_io_timer, | |
630 | BUFFER_IO_MAX_DELAY + qemu_get_clock_ms(rt_clock)); | |
631 | return NULL; | |
632 | } | |
633 | ||
634 | if (port != -1) { | |
635 | for (i = 0; i < smp_cpus; i++) { | |
636 | if (state->ioreq_local_port[i] == port) { | |
637 | break; | |
638 | } | |
639 | } | |
640 | ||
641 | if (i == smp_cpus) { | |
642 | hw_error("Fatal error while trying to get io event!\n"); | |
643 | } | |
644 | ||
645 | /* unmask the wanted port again */ | |
646 | xc_evtchn_unmask(state->xce_handle, port); | |
647 | ||
648 | /* get the io packet from shared memory */ | |
649 | state->send_vcpu = i; | |
650 | return cpu_get_ioreq_from_shared_memory(state, i); | |
651 | } | |
652 | ||
653 | /* read error or read nothing */ | |
654 | return NULL; | |
655 | } | |
656 | ||
657 | static uint32_t do_inp(pio_addr_t addr, unsigned long size) | |
658 | { | |
659 | switch (size) { | |
660 | case 1: | |
661 | return cpu_inb(addr); | |
662 | case 2: | |
663 | return cpu_inw(addr); | |
664 | case 4: | |
665 | return cpu_inl(addr); | |
666 | default: | |
667 | hw_error("inp: bad size: %04"FMT_pioaddr" %lx", addr, size); | |
668 | } | |
669 | } | |
670 | ||
671 | static void do_outp(pio_addr_t addr, | |
672 | unsigned long size, uint32_t val) | |
673 | { | |
674 | switch (size) { | |
675 | case 1: | |
676 | return cpu_outb(addr, val); | |
677 | case 2: | |
678 | return cpu_outw(addr, val); | |
679 | case 4: | |
680 | return cpu_outl(addr, val); | |
681 | default: | |
682 | hw_error("outp: bad size: %04"FMT_pioaddr" %lx", addr, size); | |
683 | } | |
684 | } | |
685 | ||
686 | static void cpu_ioreq_pio(ioreq_t *req) | |
687 | { | |
688 | int i, sign; | |
689 | ||
690 | sign = req->df ? -1 : 1; | |
691 | ||
692 | if (req->dir == IOREQ_READ) { | |
693 | if (!req->data_is_ptr) { | |
694 | req->data = do_inp(req->addr, req->size); | |
695 | } else { | |
696 | uint32_t tmp; | |
697 | ||
698 | for (i = 0; i < req->count; i++) { | |
699 | tmp = do_inp(req->addr, req->size); | |
700 | cpu_physical_memory_write( | |
701 | req->data + (sign * i * (int64_t)req->size), | |
702 | (uint8_t *) &tmp, req->size); | |
703 | } | |
704 | } | |
705 | } else if (req->dir == IOREQ_WRITE) { | |
706 | if (!req->data_is_ptr) { | |
707 | do_outp(req->addr, req->size, req->data); | |
708 | } else { | |
709 | for (i = 0; i < req->count; i++) { | |
710 | uint32_t tmp = 0; | |
711 | ||
712 | cpu_physical_memory_read( | |
713 | req->data + (sign * i * (int64_t)req->size), | |
714 | (uint8_t*) &tmp, req->size); | |
715 | do_outp(req->addr, req->size, tmp); | |
716 | } | |
717 | } | |
718 | } | |
719 | } | |
720 | ||
721 | static void cpu_ioreq_move(ioreq_t *req) | |
722 | { | |
723 | int i, sign; | |
724 | ||
725 | sign = req->df ? -1 : 1; | |
726 | ||
727 | if (!req->data_is_ptr) { | |
728 | if (req->dir == IOREQ_READ) { | |
729 | for (i = 0; i < req->count; i++) { | |
730 | cpu_physical_memory_read( | |
731 | req->addr + (sign * i * (int64_t)req->size), | |
732 | (uint8_t *) &req->data, req->size); | |
733 | } | |
734 | } else if (req->dir == IOREQ_WRITE) { | |
735 | for (i = 0; i < req->count; i++) { | |
736 | cpu_physical_memory_write( | |
737 | req->addr + (sign * i * (int64_t)req->size), | |
738 | (uint8_t *) &req->data, req->size); | |
739 | } | |
740 | } | |
741 | } else { | |
742 | uint64_t tmp; | |
743 | ||
744 | if (req->dir == IOREQ_READ) { | |
745 | for (i = 0; i < req->count; i++) { | |
746 | cpu_physical_memory_read( | |
747 | req->addr + (sign * i * (int64_t)req->size), | |
748 | (uint8_t*) &tmp, req->size); | |
749 | cpu_physical_memory_write( | |
750 | req->data + (sign * i * (int64_t)req->size), | |
751 | (uint8_t*) &tmp, req->size); | |
752 | } | |
753 | } else if (req->dir == IOREQ_WRITE) { | |
754 | for (i = 0; i < req->count; i++) { | |
755 | cpu_physical_memory_read( | |
756 | req->data + (sign * i * (int64_t)req->size), | |
757 | (uint8_t*) &tmp, req->size); | |
758 | cpu_physical_memory_write( | |
759 | req->addr + (sign * i * (int64_t)req->size), | |
760 | (uint8_t*) &tmp, req->size); | |
761 | } | |
762 | } | |
763 | } | |
764 | } | |
765 | ||
766 | static void handle_ioreq(ioreq_t *req) | |
767 | { | |
768 | if (!req->data_is_ptr && (req->dir == IOREQ_WRITE) && | |
769 | (req->size < sizeof (target_ulong))) { | |
770 | req->data &= ((target_ulong) 1 << (8 * req->size)) - 1; | |
771 | } | |
772 | ||
773 | switch (req->type) { | |
774 | case IOREQ_TYPE_PIO: | |
775 | cpu_ioreq_pio(req); | |
776 | break; | |
777 | case IOREQ_TYPE_COPY: | |
778 | cpu_ioreq_move(req); | |
779 | break; | |
780 | case IOREQ_TYPE_TIMEOFFSET: | |
781 | break; | |
782 | case IOREQ_TYPE_INVALIDATE: | |
783 | xen_invalidate_map_cache(); | |
784 | break; | |
785 | default: | |
786 | hw_error("Invalid ioreq type 0x%x\n", req->type); | |
787 | } | |
788 | } | |
789 | ||
790 | static int handle_buffered_iopage(XenIOState *state) | |
791 | { | |
792 | buf_ioreq_t *buf_req = NULL; | |
793 | ioreq_t req; | |
794 | int qw; | |
795 | ||
796 | if (!state->buffered_io_page) { | |
797 | return 0; | |
798 | } | |
799 | ||
800 | memset(&req, 0x00, sizeof(req)); | |
801 | ||
802 | while (state->buffered_io_page->read_pointer != state->buffered_io_page->write_pointer) { | |
803 | buf_req = &state->buffered_io_page->buf_ioreq[ | |
804 | state->buffered_io_page->read_pointer % IOREQ_BUFFER_SLOT_NUM]; | |
805 | req.size = 1UL << buf_req->size; | |
806 | req.count = 1; | |
807 | req.addr = buf_req->addr; | |
808 | req.data = buf_req->data; | |
809 | req.state = STATE_IOREQ_READY; | |
810 | req.dir = buf_req->dir; | |
811 | req.df = 1; | |
812 | req.type = buf_req->type; | |
813 | req.data_is_ptr = 0; | |
814 | qw = (req.size == 8); | |
815 | if (qw) { | |
816 | buf_req = &state->buffered_io_page->buf_ioreq[ | |
817 | (state->buffered_io_page->read_pointer + 1) % IOREQ_BUFFER_SLOT_NUM]; | |
818 | req.data |= ((uint64_t)buf_req->data) << 32; | |
819 | } | |
820 | ||
821 | handle_ioreq(&req); | |
822 | ||
823 | xen_mb(); | |
824 | state->buffered_io_page->read_pointer += qw ? 2 : 1; | |
825 | } | |
826 | ||
827 | return req.count; | |
828 | } | |
829 | ||
830 | static void handle_buffered_io(void *opaque) | |
831 | { | |
832 | XenIOState *state = opaque; | |
833 | ||
834 | if (handle_buffered_iopage(state)) { | |
835 | qemu_mod_timer(state->buffered_io_timer, | |
836 | BUFFER_IO_MAX_DELAY + qemu_get_clock_ms(rt_clock)); | |
837 | } else { | |
838 | qemu_del_timer(state->buffered_io_timer); | |
839 | xc_evtchn_unmask(state->xce_handle, state->bufioreq_local_port); | |
840 | } | |
841 | } | |
842 | ||
843 | static void cpu_handle_ioreq(void *opaque) | |
844 | { | |
845 | XenIOState *state = opaque; | |
846 | ioreq_t *req = cpu_get_ioreq(state); | |
847 | ||
848 | handle_buffered_iopage(state); | |
849 | if (req) { | |
850 | handle_ioreq(req); | |
851 | ||
852 | if (req->state != STATE_IOREQ_INPROCESS) { | |
853 | fprintf(stderr, "Badness in I/O request ... not in service?!: " | |
854 | "%x, ptr: %x, port: %"PRIx64", " | |
855 | "data: %"PRIx64", count: %" FMT_ioreq_size ", size: %" FMT_ioreq_size "\n", | |
856 | req->state, req->data_is_ptr, req->addr, | |
857 | req->data, req->count, req->size); | |
858 | destroy_hvm_domain(false); | |
859 | return; | |
860 | } | |
861 | ||
862 | xen_wmb(); /* Update ioreq contents /then/ update state. */ | |
863 | ||
864 | /* | |
865 | * We do this before we send the response so that the tools | |
866 | * have the opportunity to pick up on the reset before the | |
867 | * guest resumes and does a hlt with interrupts disabled which | |
868 | * causes Xen to powerdown the domain. | |
869 | */ | |
870 | if (runstate_is_running()) { | |
871 | if (qemu_shutdown_requested_get()) { | |
872 | destroy_hvm_domain(false); | |
873 | } | |
874 | if (qemu_reset_requested_get()) { | |
875 | qemu_system_reset(VMRESET_REPORT); | |
876 | destroy_hvm_domain(true); | |
877 | } | |
878 | } | |
879 | ||
880 | req->state = STATE_IORESP_READY; | |
881 | xc_evtchn_notify(state->xce_handle, state->ioreq_local_port[state->send_vcpu]); | |
882 | } | |
883 | } | |
884 | ||
885 | static int store_dev_info(int domid, CharDriverState *cs, const char *string) | |
886 | { | |
887 | struct xs_handle *xs = NULL; | |
888 | char *path = NULL; | |
889 | char *newpath = NULL; | |
890 | char *pts = NULL; | |
891 | int ret = -1; | |
892 | ||
893 | /* Only continue if we're talking to a pty. */ | |
894 | if (strncmp(cs->filename, "pty:", 4)) { | |
895 | return 0; | |
896 | } | |
897 | pts = cs->filename + 4; | |
898 | ||
899 | /* We now have everything we need to set the xenstore entry. */ | |
900 | xs = xs_open(0); | |
901 | if (xs == NULL) { | |
902 | fprintf(stderr, "Could not contact XenStore\n"); | |
903 | goto out; | |
904 | } | |
905 | ||
906 | path = xs_get_domain_path(xs, domid); | |
907 | if (path == NULL) { | |
908 | fprintf(stderr, "xs_get_domain_path() error\n"); | |
909 | goto out; | |
910 | } | |
911 | newpath = realloc(path, (strlen(path) + strlen(string) + | |
912 | strlen("/tty") + 1)); | |
913 | if (newpath == NULL) { | |
914 | fprintf(stderr, "realloc error\n"); | |
915 | goto out; | |
916 | } | |
917 | path = newpath; | |
918 | ||
919 | strcat(path, string); | |
920 | strcat(path, "/tty"); | |
921 | if (!xs_write(xs, XBT_NULL, path, pts, strlen(pts))) { | |
922 | fprintf(stderr, "xs_write for '%s' fail", string); | |
923 | goto out; | |
924 | } | |
925 | ret = 0; | |
926 | ||
927 | out: | |
928 | free(path); | |
929 | xs_close(xs); | |
930 | ||
931 | return ret; | |
932 | } | |
933 | ||
934 | void xenstore_store_pv_console_info(int i, CharDriverState *chr) | |
935 | { | |
936 | if (i == 0) { | |
937 | store_dev_info(xen_domid, chr, "/console"); | |
938 | } else { | |
939 | char buf[32]; | |
940 | snprintf(buf, sizeof(buf), "/device/console/%d", i); | |
941 | store_dev_info(xen_domid, chr, buf); | |
942 | } | |
943 | } | |
944 | ||
945 | static void xenstore_record_dm_state(struct xs_handle *xs, const char *state) | |
946 | { | |
947 | char path[50]; | |
948 | ||
949 | if (xs == NULL) { | |
950 | fprintf(stderr, "xenstore connection not initialized\n"); | |
951 | exit(1); | |
952 | } | |
953 | ||
954 | snprintf(path, sizeof (path), "/local/domain/0/device-model/%u/state", xen_domid); | |
955 | if (!xs_write(xs, XBT_NULL, path, state, strlen(state))) { | |
956 | fprintf(stderr, "error recording dm state\n"); | |
957 | exit(1); | |
958 | } | |
959 | } | |
960 | ||
961 | static void xen_main_loop_prepare(XenIOState *state) | |
962 | { | |
963 | int evtchn_fd = -1; | |
964 | ||
965 | if (state->xce_handle != XC_HANDLER_INITIAL_VALUE) { | |
966 | evtchn_fd = xc_evtchn_fd(state->xce_handle); | |
967 | } | |
968 | ||
969 | state->buffered_io_timer = qemu_new_timer_ms(rt_clock, handle_buffered_io, | |
970 | state); | |
971 | ||
972 | if (evtchn_fd != -1) { | |
973 | qemu_set_fd_handler(evtchn_fd, cpu_handle_ioreq, NULL, state); | |
974 | } | |
975 | } | |
976 | ||
977 | ||
978 | /* Initialise Xen */ | |
979 | ||
980 | static void xen_change_state_handler(void *opaque, int running, | |
981 | RunState state) | |
982 | { | |
983 | if (running) { | |
984 | /* record state running */ | |
985 | xenstore_record_dm_state(xenstore, "running"); | |
986 | } | |
987 | } | |
988 | ||
989 | static void xen_hvm_change_state_handler(void *opaque, int running, | |
990 | RunState rstate) | |
991 | { | |
992 | XenIOState *xstate = opaque; | |
993 | if (running) { | |
994 | xen_main_loop_prepare(xstate); | |
995 | } | |
996 | } | |
997 | ||
998 | static void xen_exit_notifier(Notifier *n, void *data) | |
999 | { | |
1000 | XenIOState *state = container_of(n, XenIOState, exit); | |
1001 | ||
1002 | xc_evtchn_close(state->xce_handle); | |
1003 | xs_daemon_close(state->xenstore); | |
1004 | } | |
1005 | ||
1006 | int xen_init(void) | |
1007 | { | |
1008 | xen_xc = xen_xc_interface_open(0, 0, 0); | |
1009 | if (xen_xc == XC_HANDLER_INITIAL_VALUE) { | |
1010 | xen_be_printf(NULL, 0, "can't open xen interface\n"); | |
1011 | return -1; | |
1012 | } | |
1013 | qemu_add_vm_change_state_handler(xen_change_state_handler, NULL); | |
1014 | ||
1015 | return 0; | |
1016 | } | |
1017 | ||
1018 | static void xen_read_physmap(XenIOState *state) | |
1019 | { | |
1020 | XenPhysmap *physmap = NULL; | |
1021 | unsigned int len, num, i; | |
1022 | char path[80], *value = NULL; | |
1023 | char **entries = NULL; | |
1024 | ||
1025 | snprintf(path, sizeof(path), | |
1026 | "/local/domain/0/device-model/%d/physmap", xen_domid); | |
1027 | entries = xs_directory(state->xenstore, 0, path, &num); | |
1028 | if (entries == NULL) | |
1029 | return; | |
1030 | ||
1031 | for (i = 0; i < num; i++) { | |
1032 | physmap = g_malloc(sizeof (XenPhysmap)); | |
1033 | physmap->phys_offset = strtoull(entries[i], NULL, 16); | |
1034 | snprintf(path, sizeof(path), | |
1035 | "/local/domain/0/device-model/%d/physmap/%s/start_addr", | |
1036 | xen_domid, entries[i]); | |
1037 | value = xs_read(state->xenstore, 0, path, &len); | |
1038 | if (value == NULL) { | |
1039 | free(physmap); | |
1040 | continue; | |
1041 | } | |
1042 | physmap->start_addr = strtoull(value, NULL, 16); | |
1043 | free(value); | |
1044 | ||
1045 | snprintf(path, sizeof(path), | |
1046 | "/local/domain/0/device-model/%d/physmap/%s/size", | |
1047 | xen_domid, entries[i]); | |
1048 | value = xs_read(state->xenstore, 0, path, &len); | |
1049 | if (value == NULL) { | |
1050 | free(physmap); | |
1051 | continue; | |
1052 | } | |
1053 | physmap->size = strtoull(value, NULL, 16); | |
1054 | free(value); | |
1055 | ||
1056 | snprintf(path, sizeof(path), | |
1057 | "/local/domain/0/device-model/%d/physmap/%s/name", | |
1058 | xen_domid, entries[i]); | |
1059 | physmap->name = xs_read(state->xenstore, 0, path, &len); | |
1060 | ||
1061 | QLIST_INSERT_HEAD(&state->physmap, physmap, list); | |
1062 | } | |
1063 | free(entries); | |
1064 | } | |
1065 | ||
1066 | int xen_hvm_init(void) | |
1067 | { | |
1068 | int i, rc; | |
1069 | unsigned long ioreq_pfn; | |
1070 | unsigned long bufioreq_evtchn; | |
1071 | XenIOState *state; | |
1072 | ||
1073 | state = g_malloc0(sizeof (XenIOState)); | |
1074 | ||
1075 | state->xce_handle = xen_xc_evtchn_open(NULL, 0); | |
1076 | if (state->xce_handle == XC_HANDLER_INITIAL_VALUE) { | |
1077 | perror("xen: event channel open"); | |
1078 | return -errno; | |
1079 | } | |
1080 | ||
1081 | state->xenstore = xs_daemon_open(); | |
1082 | if (state->xenstore == NULL) { | |
1083 | perror("xen: xenstore open"); | |
1084 | return -errno; | |
1085 | } | |
1086 | ||
1087 | state->exit.notify = xen_exit_notifier; | |
1088 | qemu_add_exit_notifier(&state->exit); | |
1089 | ||
1090 | state->suspend.notify = xen_suspend_notifier; | |
1091 | qemu_register_suspend_notifier(&state->suspend); | |
1092 | ||
1093 | xc_get_hvm_param(xen_xc, xen_domid, HVM_PARAM_IOREQ_PFN, &ioreq_pfn); | |
1094 | DPRINTF("shared page at pfn %lx\n", ioreq_pfn); | |
1095 | state->shared_page = xc_map_foreign_range(xen_xc, xen_domid, XC_PAGE_SIZE, | |
1096 | PROT_READ|PROT_WRITE, ioreq_pfn); | |
1097 | if (state->shared_page == NULL) { | |
1098 | hw_error("map shared IO page returned error %d handle=" XC_INTERFACE_FMT, | |
1099 | errno, xen_xc); | |
1100 | } | |
1101 | ||
1102 | xc_get_hvm_param(xen_xc, xen_domid, HVM_PARAM_BUFIOREQ_PFN, &ioreq_pfn); | |
1103 | DPRINTF("buffered io page at pfn %lx\n", ioreq_pfn); | |
1104 | state->buffered_io_page = xc_map_foreign_range(xen_xc, xen_domid, XC_PAGE_SIZE, | |
1105 | PROT_READ|PROT_WRITE, ioreq_pfn); | |
1106 | if (state->buffered_io_page == NULL) { | |
1107 | hw_error("map buffered IO page returned error %d", errno); | |
1108 | } | |
1109 | ||
1110 | state->ioreq_local_port = g_malloc0(smp_cpus * sizeof (evtchn_port_t)); | |
1111 | ||
1112 | /* FIXME: how about if we overflow the page here? */ | |
1113 | for (i = 0; i < smp_cpus; i++) { | |
1114 | rc = xc_evtchn_bind_interdomain(state->xce_handle, xen_domid, | |
1115 | xen_vcpu_eport(state->shared_page, i)); | |
1116 | if (rc == -1) { | |
1117 | fprintf(stderr, "bind interdomain ioctl error %d\n", errno); | |
1118 | return -1; | |
1119 | } | |
1120 | state->ioreq_local_port[i] = rc; | |
1121 | } | |
1122 | ||
1123 | rc = xc_get_hvm_param(xen_xc, xen_domid, HVM_PARAM_BUFIOREQ_EVTCHN, | |
1124 | &bufioreq_evtchn); | |
1125 | if (rc < 0) { | |
1126 | fprintf(stderr, "failed to get HVM_PARAM_BUFIOREQ_EVTCHN\n"); | |
1127 | return -1; | |
1128 | } | |
1129 | rc = xc_evtchn_bind_interdomain(state->xce_handle, xen_domid, | |
1130 | (uint32_t)bufioreq_evtchn); | |
1131 | if (rc == -1) { | |
1132 | fprintf(stderr, "bind interdomain ioctl error %d\n", errno); | |
1133 | return -1; | |
1134 | } | |
1135 | state->bufioreq_local_port = rc; | |
1136 | ||
1137 | /* Init RAM management */ | |
1138 | xen_map_cache_init(xen_phys_offset_to_gaddr, state); | |
1139 | xen_ram_init(ram_size); | |
1140 | ||
1141 | qemu_add_vm_change_state_handler(xen_hvm_change_state_handler, state); | |
1142 | ||
1143 | state->memory_listener = xen_memory_listener; | |
1144 | QLIST_INIT(&state->physmap); | |
1145 | memory_listener_register(&state->memory_listener, &address_space_memory); | |
1146 | state->log_for_dirtybit = NULL; | |
1147 | ||
1148 | /* Initialize backend core & drivers */ | |
1149 | if (xen_be_init() != 0) { | |
1150 | fprintf(stderr, "%s: xen backend core setup failed\n", __FUNCTION__); | |
1151 | exit(1); | |
1152 | } | |
1153 | xen_be_register("console", &xen_console_ops); | |
1154 | xen_be_register("vkbd", &xen_kbdmouse_ops); | |
1155 | xen_be_register("qdisk", &xen_blkdev_ops); | |
1156 | xen_read_physmap(state); | |
1157 | ||
1158 | return 0; | |
1159 | } | |
1160 | ||
1161 | void destroy_hvm_domain(bool reboot) | |
1162 | { | |
1163 | XenXC xc_handle; | |
1164 | int sts; | |
1165 | ||
1166 | xc_handle = xen_xc_interface_open(0, 0, 0); | |
1167 | if (xc_handle == XC_HANDLER_INITIAL_VALUE) { | |
1168 | fprintf(stderr, "Cannot acquire xenctrl handle\n"); | |
1169 | } else { | |
1170 | sts = xc_domain_shutdown(xc_handle, xen_domid, | |
1171 | reboot ? SHUTDOWN_reboot : SHUTDOWN_poweroff); | |
1172 | if (sts != 0) { | |
1173 | fprintf(stderr, "xc_domain_shutdown failed to issue %s, " | |
1174 | "sts %d, %s\n", reboot ? "reboot" : "poweroff", | |
1175 | sts, strerror(errno)); | |
1176 | } else { | |
1177 | fprintf(stderr, "Issued domain %d %s\n", xen_domid, | |
1178 | reboot ? "reboot" : "poweroff"); | |
1179 | } | |
1180 | xc_interface_close(xc_handle); | |
1181 | } | |
1182 | } | |
1183 | ||
1184 | void xen_register_framebuffer(MemoryRegion *mr) | |
1185 | { | |
1186 | framebuffer = mr; | |
1187 | } | |
1188 | ||
1189 | void xen_shutdown_fatal_error(const char *fmt, ...) | |
1190 | { | |
1191 | va_list ap; | |
1192 | ||
1193 | va_start(ap, fmt); | |
1194 | vfprintf(stderr, fmt, ap); | |
1195 | va_end(ap); | |
1196 | fprintf(stderr, "Will destroy the domain.\n"); | |
1197 | /* destroy the domain */ | |
1198 | qemu_system_shutdown_request(); | |
1199 | } | |
1200 | ||
1201 | void xen_modified_memory(ram_addr_t start, ram_addr_t length) | |
1202 | { | |
1203 | if (unlikely(xen_in_migration)) { | |
1204 | int rc; | |
1205 | ram_addr_t start_pfn, nb_pages; | |
1206 | ||
1207 | if (length == 0) { | |
1208 | length = TARGET_PAGE_SIZE; | |
1209 | } | |
1210 | start_pfn = start >> TARGET_PAGE_BITS; | |
1211 | nb_pages = ((start + length + TARGET_PAGE_SIZE - 1) >> TARGET_PAGE_BITS) | |
1212 | - start_pfn; | |
1213 | rc = xc_hvm_modified_memory(xen_xc, xen_domid, start_pfn, nb_pages); | |
1214 | if (rc) { | |
1215 | fprintf(stderr, | |
1216 | "%s failed for "RAM_ADDR_FMT" ("RAM_ADDR_FMT"): %i, %s\n", | |
1217 | __func__, start, nb_pages, rc, strerror(-rc)); | |
1218 | } | |
1219 | } | |
1220 | } |