]>
Commit | Line | Data |
---|---|---|
3285cf4f AP |
1 | /* |
2 | * Copyright (C) 2010 Citrix Ltd. | |
3 | * | |
4 | * This work is licensed under the terms of the GNU GPL, version 2. See | |
5 | * the COPYING file in the top-level directory. | |
6 | * | |
6b620ca3 PB |
7 | * Contributions after 2012-01-13 are licensed under the terms of the |
8 | * GNU GPL, version 2 or (at your option) any later version. | |
3285cf4f AP |
9 | */ |
10 | ||
21cbfe5f | 11 | #include "qemu/osdep.h" |
9ce94e7c | 12 | |
33c11879 | 13 | #include "cpu.h" |
a2cb15b0 | 14 | #include "hw/pci/pci.h" |
dfb6578d | 15 | #include "hw/pci/pci_host.h" |
0d09e41a | 16 | #include "hw/i386/pc.h" |
428c3ece | 17 | #include "hw/i386/apic-msidef.h" |
0d09e41a | 18 | #include "hw/xen/xen_common.h" |
2d0ed5e6 | 19 | #include "hw/xen/xen-legacy-backend.h" |
108f7bba | 20 | #include "hw/xen/xen-bus.h" |
e688df6b | 21 | #include "qapi/error.h" |
112ed241 | 22 | #include "qapi/qapi-commands-misc.h" |
dced4d2f | 23 | #include "qemu/error-report.h" |
1de7afc9 | 24 | #include "qemu/range.h" |
9c17d615 | 25 | #include "sysemu/xen-mapcache.h" |
93d43e7e | 26 | #include "trace.h" |
022c62cb | 27 | #include "exec/address-spaces.h" |
432d268c | 28 | |
9ce94e7c | 29 | #include <xen/hvm/ioreq.h> |
8a369e20 | 30 | #include <xen/hvm/e820.h> |
9ce94e7c | 31 | |
04b0de0e | 32 | //#define DEBUG_XEN_HVM |
9ce94e7c | 33 | |
04b0de0e | 34 | #ifdef DEBUG_XEN_HVM |
9ce94e7c AS |
35 | #define DPRINTF(fmt, ...) \ |
36 | do { fprintf(stderr, "xen: " fmt, ## __VA_ARGS__); } while (0) | |
37 | #else | |
38 | #define DPRINTF(fmt, ...) \ | |
39 | do { } while (0) | |
40 | #endif | |
41 | ||
ce76b8a8 | 42 | static MemoryRegion ram_memory, ram_640k, ram_lo, ram_hi; |
c65adf9b | 43 | static MemoryRegion *framebuffer; |
39f42439 | 44 | static bool xen_in_migration; |
ce76b8a8 | 45 | |
9ce94e7c | 46 | /* Compatibility with older version */ |
37f9e258 DS |
47 | |
48 | /* This allows QEMU to build on a system that has Xen 4.5 or earlier | |
49 | * installed. This here (not in hw/xen/xen_common.h) because xen/hvm/ioreq.h | |
50 | * needs to be included before this block and hw/xen/xen_common.h needs to | |
51 | * be included before xen/hvm/ioreq.h | |
52 | */ | |
53 | #ifndef IOREQ_TYPE_VMWARE_PORT | |
54 | #define IOREQ_TYPE_VMWARE_PORT 3 | |
55 | struct vmware_regs { | |
56 | uint32_t esi; | |
57 | uint32_t edi; | |
58 | uint32_t ebx; | |
59 | uint32_t ecx; | |
60 | uint32_t edx; | |
61 | }; | |
62 | typedef struct vmware_regs vmware_regs_t; | |
63 | ||
64 | struct shared_vmport_iopage { | |
65 | struct vmware_regs vcpu_vmport_regs[1]; | |
66 | }; | |
67 | typedef struct shared_vmport_iopage shared_vmport_iopage_t; | |
68 | #endif | |
69 | ||
9ce94e7c AS |
70 | static inline uint32_t xen_vcpu_eport(shared_iopage_t *shared_page, int i) |
71 | { | |
72 | return shared_page->vcpu_ioreq[i].vp_eport; | |
73 | } | |
74 | static inline ioreq_t *xen_vcpu_ioreq(shared_iopage_t *shared_page, int vcpu) | |
75 | { | |
76 | return &shared_page->vcpu_ioreq[vcpu]; | |
77 | } | |
9ce94e7c AS |
78 | |
79 | #define BUFFER_IO_MAX_DELAY 100 | |
80 | ||
b4dd7802 | 81 | typedef struct XenPhysmap { |
a8170e5e | 82 | hwaddr start_addr; |
b4dd7802 | 83 | ram_addr_t size; |
dc6c4fe8 | 84 | const char *name; |
a8170e5e | 85 | hwaddr phys_offset; |
b4dd7802 AP |
86 | |
87 | QLIST_ENTRY(XenPhysmap) list; | |
88 | } XenPhysmap; | |
89 | ||
04a8f72e ID |
90 | static QLIST_HEAD(, XenPhysmap) xen_physmap; |
91 | ||
dfb6578d PD |
92 | typedef struct XenPciDevice { |
93 | PCIDevice *pci_dev; | |
94 | uint32_t sbdf; | |
95 | QLIST_ENTRY(XenPciDevice) entry; | |
96 | } XenPciDevice; | |
97 | ||
9ce94e7c | 98 | typedef struct XenIOState { |
3996e85c | 99 | ioservid_t ioservid; |
9ce94e7c | 100 | shared_iopage_t *shared_page; |
37f9e258 | 101 | shared_vmport_iopage_t *shared_vmport_page; |
9ce94e7c AS |
102 | buffered_iopage_t *buffered_io_page; |
103 | QEMUTimer *buffered_io_timer; | |
37f9e258 | 104 | CPUState **cpu_by_vcpu_id; |
9ce94e7c AS |
105 | /* the evtchn port for polling the notification, */ |
106 | evtchn_port_t *ioreq_local_port; | |
71cec1ed PD |
107 | /* evtchn remote and local ports for buffered io */ |
108 | evtchn_port_t bufioreq_remote_port; | |
fda1f768 | 109 | evtchn_port_t bufioreq_local_port; |
9ce94e7c | 110 | /* the evtchn fd for polling */ |
a2db2a1e | 111 | xenevtchn_handle *xce_handle; |
9ce94e7c AS |
112 | /* which vcpu we are serving */ |
113 | int send_vcpu; | |
114 | ||
29321335 | 115 | struct xs_handle *xenstore; |
20581d20 | 116 | MemoryListener memory_listener; |
3996e85c | 117 | MemoryListener io_listener; |
dfb6578d | 118 | QLIST_HEAD(, XenPciDevice) dev_list; |
3996e85c | 119 | DeviceListener device_listener; |
a8170e5e | 120 | hwaddr free_phys_offset; |
b4dd7802 | 121 | const XenPhysmap *log_for_dirtybit; |
34fbbc16 AP |
122 | /* Buffer used by xen_sync_dirty_bitmap */ |
123 | unsigned long *dirty_bitmap; | |
29321335 | 124 | |
9ce94e7c | 125 | Notifier exit; |
da98c8eb | 126 | Notifier suspend; |
11addd0a | 127 | Notifier wakeup; |
9ce94e7c AS |
128 | } XenIOState; |
129 | ||
41445300 AP |
130 | /* Xen specific function for piix pci */ |
131 | ||
132 | int xen_pci_slot_get_pirq(PCIDevice *pci_dev, int irq_num) | |
133 | { | |
134 | return irq_num + ((pci_dev->devfn >> 3) << 2); | |
135 | } | |
136 | ||
137 | void xen_piix3_set_irq(void *opaque, int irq_num, int level) | |
138 | { | |
8f25e754 PD |
139 | xen_set_pci_intx_level(xen_domid, 0, 0, irq_num >> 2, |
140 | irq_num & 3, level); | |
41445300 AP |
141 | } |
142 | ||
143 | void xen_piix_pci_write_config_client(uint32_t address, uint32_t val, int len) | |
144 | { | |
145 | int i; | |
146 | ||
147 | /* Scan for updates to PCI link routes (0x60-0x63). */ | |
148 | for (i = 0; i < len; i++) { | |
149 | uint8_t v = (val >> (8 * i)) & 0xff; | |
150 | if (v & 0x80) { | |
151 | v = 0; | |
152 | } | |
153 | v &= 0xf; | |
154 | if (((address + i) >= 0x60) && ((address + i) <= 0x63)) { | |
8f25e754 | 155 | xen_set_pci_link_route(xen_domid, address + i - 0x60, v); |
41445300 AP |
156 | } |
157 | } | |
158 | } | |
159 | ||
428c3ece SS |
160 | int xen_is_pirq_msi(uint32_t msi_data) |
161 | { | |
162 | /* If vector is 0, the msi is remapped into a pirq, passed as | |
163 | * dest_id. | |
164 | */ | |
165 | return ((msi_data & MSI_DATA_VECTOR_MASK) >> MSI_DATA_VECTOR_SHIFT) == 0; | |
166 | } | |
167 | ||
f1dbf015 WL |
168 | void xen_hvm_inject_msi(uint64_t addr, uint32_t data) |
169 | { | |
8f25e754 | 170 | xen_inject_msi(xen_domid, addr, data); |
f1dbf015 WL |
171 | } |
172 | ||
da98c8eb | 173 | static void xen_suspend_notifier(Notifier *notifier, void *data) |
c9622478 | 174 | { |
da98c8eb | 175 | xc_set_hvm_param(xen_xc, xen_domid, HVM_PARAM_ACPI_S_STATE, 3); |
c9622478 AP |
176 | } |
177 | ||
9c11a8ac AP |
178 | /* Xen Interrupt Controller */ |
179 | ||
180 | static void xen_set_irq(void *opaque, int irq, int level) | |
181 | { | |
8f25e754 | 182 | xen_set_isa_irq_level(xen_domid, irq, level); |
9c11a8ac AP |
183 | } |
184 | ||
185 | qemu_irq *xen_interrupt_controller_init(void) | |
186 | { | |
187 | return qemu_allocate_irqs(xen_set_irq, NULL, 16); | |
188 | } | |
189 | ||
432d268c JN |
190 | /* Memory Ops */ |
191 | ||
91176e31 | 192 | static void xen_ram_init(PCMachineState *pcms, |
3c2a9669 | 193 | ram_addr_t ram_size, MemoryRegion **ram_memory_p) |
432d268c | 194 | { |
ce76b8a8 | 195 | MemoryRegion *sysmem = get_system_memory(); |
ce76b8a8 | 196 | ram_addr_t block_len; |
4ccd89d2 MAL |
197 | uint64_t user_lowmem = object_property_get_uint(qdev_get_machine(), |
198 | PC_MACHINE_MAX_RAM_BELOW_4G, | |
199 | &error_abort); | |
432d268c | 200 | |
a9dd38db | 201 | /* Handle the machine opt max-ram-below-4g. It is basically doing |
c4f5cdc5 DS |
202 | * min(xen limit, user limit). |
203 | */ | |
5ec7d098 GH |
204 | if (!user_lowmem) { |
205 | user_lowmem = HVM_BELOW_4G_RAM_END; /* default */ | |
206 | } | |
c4f5cdc5 DS |
207 | if (HVM_BELOW_4G_RAM_END <= user_lowmem) { |
208 | user_lowmem = HVM_BELOW_4G_RAM_END; | |
8a369e20 | 209 | } |
432d268c | 210 | |
c4f5cdc5 | 211 | if (ram_size >= user_lowmem) { |
91176e31 EH |
212 | pcms->above_4g_mem_size = ram_size - user_lowmem; |
213 | pcms->below_4g_mem_size = user_lowmem; | |
432d268c | 214 | } else { |
91176e31 EH |
215 | pcms->above_4g_mem_size = 0; |
216 | pcms->below_4g_mem_size = ram_size; | |
432d268c | 217 | } |
91176e31 | 218 | if (!pcms->above_4g_mem_size) { |
c4f5cdc5 DS |
219 | block_len = ram_size; |
220 | } else { | |
221 | /* | |
222 | * Xen does not allocate the memory continuously, it keeps a | |
223 | * hole of the size computed above or passed in. | |
224 | */ | |
91176e31 | 225 | block_len = (1ULL << 32) + pcms->above_4g_mem_size; |
c4f5cdc5 | 226 | } |
98a99ce0 | 227 | memory_region_init_ram(&ram_memory, NULL, "xen.ram", block_len, |
f8ed85ac | 228 | &error_fatal); |
c4f5cdc5 | 229 | *ram_memory_p = &ram_memory; |
432d268c | 230 | |
2c9b15ca | 231 | memory_region_init_alias(&ram_640k, NULL, "xen.ram.640k", |
ce76b8a8 AK |
232 | &ram_memory, 0, 0xa0000); |
233 | memory_region_add_subregion(sysmem, 0, &ram_640k); | |
8a369e20 AP |
234 | /* Skip of the VGA IO memory space, it will be registered later by the VGA |
235 | * emulated device. | |
236 | * | |
237 | * The area between 0xc0000 and 0x100000 will be used by SeaBIOS to load | |
238 | * the Options ROM, so it is registered here as RAM. | |
239 | */ | |
2c9b15ca | 240 | memory_region_init_alias(&ram_lo, NULL, "xen.ram.lo", |
3c2a9669 | 241 | &ram_memory, 0xc0000, |
91176e31 | 242 | pcms->below_4g_mem_size - 0xc0000); |
ce76b8a8 | 243 | memory_region_add_subregion(sysmem, 0xc0000, &ram_lo); |
91176e31 | 244 | if (pcms->above_4g_mem_size > 0) { |
2c9b15ca | 245 | memory_region_init_alias(&ram_hi, NULL, "xen.ram.hi", |
ce76b8a8 | 246 | &ram_memory, 0x100000000ULL, |
91176e31 | 247 | pcms->above_4g_mem_size); |
ce76b8a8 | 248 | memory_region_add_subregion(sysmem, 0x100000000ULL, &ram_hi); |
432d268c | 249 | } |
432d268c JN |
250 | } |
251 | ||
37aa7a0e MA |
252 | void xen_ram_alloc(ram_addr_t ram_addr, ram_addr_t size, MemoryRegion *mr, |
253 | Error **errp) | |
432d268c JN |
254 | { |
255 | unsigned long nr_pfn; | |
256 | xen_pfn_t *pfn_list; | |
257 | int i; | |
258 | ||
c234572d AP |
259 | if (runstate_check(RUN_STATE_INMIGRATE)) { |
260 | /* RAM already populated in Xen */ | |
261 | fprintf(stderr, "%s: do not alloc "RAM_ADDR_FMT | |
262 | " bytes of ram at "RAM_ADDR_FMT" when runstate is INMIGRATE\n", | |
263 | __func__, size, ram_addr); | |
264 | return; | |
265 | } | |
266 | ||
ce76b8a8 AK |
267 | if (mr == &ram_memory) { |
268 | return; | |
269 | } | |
270 | ||
432d268c JN |
271 | trace_xen_ram_alloc(ram_addr, size); |
272 | ||
273 | nr_pfn = size >> TARGET_PAGE_BITS; | |
7267c094 | 274 | pfn_list = g_malloc(sizeof (*pfn_list) * nr_pfn); |
432d268c JN |
275 | |
276 | for (i = 0; i < nr_pfn; i++) { | |
277 | pfn_list[i] = (ram_addr >> TARGET_PAGE_BITS) + i; | |
278 | } | |
279 | ||
280 | if (xc_domain_populate_physmap_exact(xen_xc, xen_domid, nr_pfn, 0, 0, pfn_list)) { | |
37aa7a0e MA |
281 | error_setg(errp, "xen: failed to populate ram at " RAM_ADDR_FMT, |
282 | ram_addr); | |
432d268c JN |
283 | } |
284 | ||
7267c094 | 285 | g_free(pfn_list); |
432d268c JN |
286 | } |
287 | ||
04a8f72e | 288 | static XenPhysmap *get_physmapping(hwaddr start_addr, ram_addr_t size) |
b4dd7802 AP |
289 | { |
290 | XenPhysmap *physmap = NULL; | |
291 | ||
292 | start_addr &= TARGET_PAGE_MASK; | |
293 | ||
04a8f72e | 294 | QLIST_FOREACH(physmap, &xen_physmap, list) { |
b4dd7802 AP |
295 | if (range_covers_byte(physmap->start_addr, physmap->size, start_addr)) { |
296 | return physmap; | |
297 | } | |
298 | } | |
299 | return NULL; | |
300 | } | |
301 | ||
04a8f72e | 302 | static hwaddr xen_phys_offset_to_gaddr(hwaddr phys_offset, ram_addr_t size) |
cd1ba7de | 303 | { |
04a8f72e | 304 | hwaddr addr = phys_offset & TARGET_PAGE_MASK; |
cd1ba7de AP |
305 | XenPhysmap *physmap = NULL; |
306 | ||
04a8f72e | 307 | QLIST_FOREACH(physmap, &xen_physmap, list) { |
cd1ba7de | 308 | if (range_covers_byte(physmap->phys_offset, physmap->size, addr)) { |
04a8f72e | 309 | return physmap->start_addr + (phys_offset - physmap->phys_offset); |
cd1ba7de AP |
310 | } |
311 | } | |
312 | ||
04a8f72e | 313 | return phys_offset; |
cd1ba7de AP |
314 | } |
315 | ||
04a8f72e | 316 | #ifdef XEN_COMPAT_PHYSMAP |
697b66d0 ID |
317 | static int xen_save_physmap(XenIOState *state, XenPhysmap *physmap) |
318 | { | |
319 | char path[80], value[17]; | |
320 | ||
321 | snprintf(path, sizeof(path), | |
322 | "/local/domain/0/device-model/%d/physmap/%"PRIx64"/start_addr", | |
323 | xen_domid, (uint64_t)physmap->phys_offset); | |
324 | snprintf(value, sizeof(value), "%"PRIx64, (uint64_t)physmap->start_addr); | |
325 | if (!xs_write(state->xenstore, 0, path, value, strlen(value))) { | |
326 | return -1; | |
327 | } | |
328 | snprintf(path, sizeof(path), | |
329 | "/local/domain/0/device-model/%d/physmap/%"PRIx64"/size", | |
330 | xen_domid, (uint64_t)physmap->phys_offset); | |
331 | snprintf(value, sizeof(value), "%"PRIx64, (uint64_t)physmap->size); | |
332 | if (!xs_write(state->xenstore, 0, path, value, strlen(value))) { | |
333 | return -1; | |
334 | } | |
335 | if (physmap->name) { | |
336 | snprintf(path, sizeof(path), | |
337 | "/local/domain/0/device-model/%d/physmap/%"PRIx64"/name", | |
338 | xen_domid, (uint64_t)physmap->phys_offset); | |
339 | if (!xs_write(state->xenstore, 0, path, | |
340 | physmap->name, strlen(physmap->name))) { | |
341 | return -1; | |
342 | } | |
343 | } | |
344 | return 0; | |
345 | } | |
331b5189 ID |
346 | #else |
347 | static int xen_save_physmap(XenIOState *state, XenPhysmap *physmap) | |
348 | { | |
349 | return 0; | |
350 | } | |
351 | #endif | |
697b66d0 | 352 | |
b4dd7802 | 353 | static int xen_add_to_physmap(XenIOState *state, |
a8170e5e | 354 | hwaddr start_addr, |
b4dd7802 | 355 | ram_addr_t size, |
20581d20 | 356 | MemoryRegion *mr, |
a8170e5e | 357 | hwaddr offset_within_region) |
b4dd7802 | 358 | { |
2cbf8903 | 359 | unsigned long nr_pages; |
b4dd7802 AP |
360 | int rc = 0; |
361 | XenPhysmap *physmap = NULL; | |
a8170e5e AK |
362 | hwaddr pfn, start_gpfn; |
363 | hwaddr phys_offset = memory_region_get_ram_addr(mr); | |
3e1f5086 | 364 | const char *mr_name; |
b4dd7802 | 365 | |
04a8f72e | 366 | if (get_physmapping(start_addr, size)) { |
b4dd7802 AP |
367 | return 0; |
368 | } | |
369 | if (size <= 0) { | |
370 | return -1; | |
371 | } | |
372 | ||
ebed8505 SS |
373 | /* Xen can only handle a single dirty log region for now and we want |
374 | * the linear framebuffer to be that region. | |
375 | * Avoid tracking any regions that is not videoram and avoid tracking | |
376 | * the legacy vga region. */ | |
20581d20 AK |
377 | if (mr == framebuffer && start_addr > 0xbffff) { |
378 | goto go_physmap; | |
ebed8505 SS |
379 | } |
380 | return -1; | |
381 | ||
382 | go_physmap: | |
f1b8caf1 SE |
383 | DPRINTF("mapping vram to %"HWADDR_PRIx" - %"HWADDR_PRIx"\n", |
384 | start_addr, start_addr + size); | |
b4dd7802 | 385 | |
331b5189 ID |
386 | mr_name = memory_region_name(mr); |
387 | ||
388 | physmap = g_malloc(sizeof(XenPhysmap)); | |
389 | ||
390 | physmap->start_addr = start_addr; | |
391 | physmap->size = size; | |
392 | physmap->name = mr_name; | |
393 | physmap->phys_offset = phys_offset; | |
394 | ||
04a8f72e | 395 | QLIST_INSERT_HEAD(&xen_physmap, physmap, list); |
331b5189 ID |
396 | |
397 | if (runstate_check(RUN_STATE_INMIGRATE)) { | |
398 | /* Now when we have a physmap entry we can replace a dummy mapping with | |
399 | * a real one of guest foreign memory. */ | |
400 | uint8_t *p = xen_replace_cache_entry(phys_offset, start_addr, size); | |
401 | assert(p && p == memory_region_get_ram_ptr(mr)); | |
402 | ||
403 | return 0; | |
404 | } | |
405 | ||
b4dd7802 AP |
406 | pfn = phys_offset >> TARGET_PAGE_BITS; |
407 | start_gpfn = start_addr >> TARGET_PAGE_BITS; | |
2cbf8903 RL |
408 | nr_pages = size >> TARGET_PAGE_BITS; |
409 | rc = xendevicemodel_relocate_memory(xen_dmod, xen_domid, nr_pages, pfn, | |
410 | start_gpfn); | |
411 | if (rc) { | |
412 | int saved_errno = errno; | |
413 | ||
414 | error_report("relocate_memory %lu pages from GFN %"HWADDR_PRIx | |
415 | " to GFN %"HWADDR_PRIx" failed: %s", | |
416 | nr_pages, pfn, start_gpfn, strerror(saved_errno)); | |
417 | errno = saved_errno; | |
418 | return -1; | |
b4dd7802 AP |
419 | } |
420 | ||
2cbf8903 | 421 | rc = xendevicemodel_pin_memory_cacheattr(xen_dmod, xen_domid, |
b4dd7802 | 422 | start_addr >> TARGET_PAGE_BITS, |
8b6bb0ad | 423 | (start_addr + size - 1) >> TARGET_PAGE_BITS, |
b4dd7802 | 424 | XEN_DOMCTL_MEM_CACHEATTR_WB); |
2cbf8903 RL |
425 | if (rc) { |
426 | error_report("pin_memory_cacheattr failed: %s", strerror(errno)); | |
427 | } | |
697b66d0 | 428 | return xen_save_physmap(state, physmap); |
b4dd7802 AP |
429 | } |
430 | ||
431 | static int xen_remove_from_physmap(XenIOState *state, | |
a8170e5e | 432 | hwaddr start_addr, |
b4dd7802 AP |
433 | ram_addr_t size) |
434 | { | |
b4dd7802 AP |
435 | int rc = 0; |
436 | XenPhysmap *physmap = NULL; | |
a8170e5e | 437 | hwaddr phys_offset = 0; |
b4dd7802 | 438 | |
04a8f72e | 439 | physmap = get_physmapping(start_addr, size); |
b4dd7802 AP |
440 | if (physmap == NULL) { |
441 | return -1; | |
442 | } | |
443 | ||
444 | phys_offset = physmap->phys_offset; | |
445 | size = physmap->size; | |
446 | ||
d18e173a WL |
447 | DPRINTF("unmapping vram to %"HWADDR_PRIx" - %"HWADDR_PRIx", at " |
448 | "%"HWADDR_PRIx"\n", start_addr, start_addr + size, phys_offset); | |
b4dd7802 AP |
449 | |
450 | size >>= TARGET_PAGE_BITS; | |
451 | start_addr >>= TARGET_PAGE_BITS; | |
452 | phys_offset >>= TARGET_PAGE_BITS; | |
2cbf8903 RL |
453 | rc = xendevicemodel_relocate_memory(xen_dmod, xen_domid, size, start_addr, |
454 | phys_offset); | |
455 | if (rc) { | |
456 | int saved_errno = errno; | |
457 | ||
458 | error_report("relocate_memory "RAM_ADDR_FMT" pages" | |
459 | " from GFN %"HWADDR_PRIx | |
460 | " to GFN %"HWADDR_PRIx" failed: %s", | |
461 | size, start_addr, phys_offset, strerror(saved_errno)); | |
462 | errno = saved_errno; | |
463 | return -1; | |
b4dd7802 AP |
464 | } |
465 | ||
466 | QLIST_REMOVE(physmap, list); | |
467 | if (state->log_for_dirtybit == physmap) { | |
468 | state->log_for_dirtybit = NULL; | |
34fbbc16 AP |
469 | g_free(state->dirty_bitmap); |
470 | state->dirty_bitmap = NULL; | |
b4dd7802 | 471 | } |
c5633d99 | 472 | g_free(physmap); |
b4dd7802 AP |
473 | |
474 | return 0; | |
475 | } | |
476 | ||
20581d20 AK |
477 | static void xen_set_memory(struct MemoryListener *listener, |
478 | MemoryRegionSection *section, | |
479 | bool add) | |
b4dd7802 | 480 | { |
20581d20 | 481 | XenIOState *state = container_of(listener, XenIOState, memory_listener); |
a8170e5e | 482 | hwaddr start_addr = section->offset_within_address_space; |
052e87b0 | 483 | ram_addr_t size = int128_get64(section->size); |
2d1a35be | 484 | bool log_dirty = memory_region_is_logging(section->mr, DIRTY_MEMORY_VGA); |
b4dd7802 AP |
485 | hvmmem_type_t mem_type; |
486 | ||
3996e85c PD |
487 | if (section->mr == &ram_memory) { |
488 | return; | |
489 | } else { | |
490 | if (add) { | |
260cabed | 491 | xen_map_memory_section(xen_domid, state->ioservid, |
3996e85c PD |
492 | section); |
493 | } else { | |
260cabed | 494 | xen_unmap_memory_section(xen_domid, state->ioservid, |
3996e85c PD |
495 | section); |
496 | } | |
497 | } | |
498 | ||
20581d20 | 499 | if (!memory_region_is_ram(section->mr)) { |
b4dd7802 AP |
500 | return; |
501 | } | |
502 | ||
3996e85c | 503 | if (log_dirty != add) { |
20581d20 AK |
504 | return; |
505 | } | |
506 | ||
507 | trace_xen_client_set_memory(start_addr, size, log_dirty); | |
b4dd7802 AP |
508 | |
509 | start_addr &= TARGET_PAGE_MASK; | |
510 | size = TARGET_PAGE_ALIGN(size); | |
20581d20 AK |
511 | |
512 | if (add) { | |
513 | if (!memory_region_is_rom(section->mr)) { | |
514 | xen_add_to_physmap(state, start_addr, size, | |
515 | section->mr, section->offset_within_region); | |
516 | } else { | |
517 | mem_type = HVMMEM_ram_ro; | |
8f25e754 PD |
518 | if (xen_set_mem_type(xen_domid, mem_type, |
519 | start_addr >> TARGET_PAGE_BITS, | |
520 | size >> TARGET_PAGE_BITS)) { | |
521 | DPRINTF("xen_set_mem_type error, addr: "TARGET_FMT_plx"\n", | |
20581d20 AK |
522 | start_addr); |
523 | } | |
b4dd7802 | 524 | } |
20581d20 | 525 | } else { |
b4dd7802 AP |
526 | if (xen_remove_from_physmap(state, start_addr, size) < 0) { |
527 | DPRINTF("physmapping does not exist at "TARGET_FMT_plx"\n", start_addr); | |
528 | } | |
b4dd7802 AP |
529 | } |
530 | } | |
531 | ||
20581d20 AK |
532 | static void xen_region_add(MemoryListener *listener, |
533 | MemoryRegionSection *section) | |
534 | { | |
dfde4e6e | 535 | memory_region_ref(section->mr); |
20581d20 AK |
536 | xen_set_memory(listener, section, true); |
537 | } | |
538 | ||
539 | static void xen_region_del(MemoryListener *listener, | |
540 | MemoryRegionSection *section) | |
541 | { | |
542 | xen_set_memory(listener, section, false); | |
dfde4e6e | 543 | memory_region_unref(section->mr); |
20581d20 AK |
544 | } |
545 | ||
3996e85c PD |
546 | static void xen_io_add(MemoryListener *listener, |
547 | MemoryRegionSection *section) | |
548 | { | |
549 | XenIOState *state = container_of(listener, XenIOState, io_listener); | |
a8ff4316 | 550 | MemoryRegion *mr = section->mr; |
3996e85c | 551 | |
a8ff4316 PD |
552 | if (mr->ops == &unassigned_io_ops) { |
553 | return; | |
554 | } | |
555 | ||
556 | memory_region_ref(mr); | |
3996e85c | 557 | |
260cabed | 558 | xen_map_io_section(xen_domid, state->ioservid, section); |
3996e85c PD |
559 | } |
560 | ||
561 | static void xen_io_del(MemoryListener *listener, | |
562 | MemoryRegionSection *section) | |
563 | { | |
564 | XenIOState *state = container_of(listener, XenIOState, io_listener); | |
a8ff4316 PD |
565 | MemoryRegion *mr = section->mr; |
566 | ||
567 | if (mr->ops == &unassigned_io_ops) { | |
568 | return; | |
569 | } | |
3996e85c | 570 | |
260cabed | 571 | xen_unmap_io_section(xen_domid, state->ioservid, section); |
3996e85c | 572 | |
a8ff4316 | 573 | memory_region_unref(mr); |
3996e85c PD |
574 | } |
575 | ||
576 | static void xen_device_realize(DeviceListener *listener, | |
7d37435b | 577 | DeviceState *dev) |
3996e85c PD |
578 | { |
579 | XenIOState *state = container_of(listener, XenIOState, device_listener); | |
580 | ||
581 | if (object_dynamic_cast(OBJECT(dev), TYPE_PCI_DEVICE)) { | |
582 | PCIDevice *pci_dev = PCI_DEVICE(dev); | |
dfb6578d PD |
583 | XenPciDevice *xendev = g_new(XenPciDevice, 1); |
584 | ||
585 | xendev->pci_dev = pci_dev; | |
586 | xendev->sbdf = PCI_BUILD_BDF(pci_dev_bus_num(pci_dev), | |
587 | pci_dev->devfn); | |
588 | QLIST_INSERT_HEAD(&state->dev_list, xendev, entry); | |
3996e85c | 589 | |
260cabed | 590 | xen_map_pcidev(xen_domid, state->ioservid, pci_dev); |
3996e85c PD |
591 | } |
592 | } | |
593 | ||
594 | static void xen_device_unrealize(DeviceListener *listener, | |
7d37435b | 595 | DeviceState *dev) |
3996e85c PD |
596 | { |
597 | XenIOState *state = container_of(listener, XenIOState, device_listener); | |
598 | ||
599 | if (object_dynamic_cast(OBJECT(dev), TYPE_PCI_DEVICE)) { | |
600 | PCIDevice *pci_dev = PCI_DEVICE(dev); | |
dfb6578d | 601 | XenPciDevice *xendev, *next; |
3996e85c | 602 | |
260cabed | 603 | xen_unmap_pcidev(xen_domid, state->ioservid, pci_dev); |
dfb6578d PD |
604 | |
605 | QLIST_FOREACH_SAFE(xendev, &state->dev_list, entry, next) { | |
606 | if (xendev->pci_dev == pci_dev) { | |
607 | QLIST_REMOVE(xendev, entry); | |
608 | g_free(xendev); | |
609 | break; | |
610 | } | |
611 | } | |
3996e85c PD |
612 | } |
613 | } | |
614 | ||
b18620cf | 615 | static void xen_sync_dirty_bitmap(XenIOState *state, |
a8170e5e | 616 | hwaddr start_addr, |
b18620cf | 617 | ram_addr_t size) |
b4dd7802 | 618 | { |
a8170e5e | 619 | hwaddr npages = size >> TARGET_PAGE_BITS; |
b4dd7802 | 620 | const int width = sizeof(unsigned long) * 8; |
34fbbc16 | 621 | size_t bitmap_size = DIV_ROUND_UP(npages, width); |
b4dd7802 AP |
622 | int rc, i, j; |
623 | const XenPhysmap *physmap = NULL; | |
624 | ||
04a8f72e | 625 | physmap = get_physmapping(start_addr, size); |
b4dd7802 AP |
626 | if (physmap == NULL) { |
627 | /* not handled */ | |
b18620cf | 628 | return; |
b4dd7802 AP |
629 | } |
630 | ||
631 | if (state->log_for_dirtybit == NULL) { | |
632 | state->log_for_dirtybit = physmap; | |
34fbbc16 | 633 | state->dirty_bitmap = g_new(unsigned long, bitmap_size); |
b4dd7802 | 634 | } else if (state->log_for_dirtybit != physmap) { |
b18620cf AP |
635 | /* Only one range for dirty bitmap can be tracked. */ |
636 | return; | |
b4dd7802 | 637 | } |
b4dd7802 | 638 | |
8f25e754 | 639 | rc = xen_track_dirty_vram(xen_domid, start_addr >> TARGET_PAGE_BITS, |
34fbbc16 | 640 | npages, state->dirty_bitmap); |
b18620cf | 641 | if (rc < 0) { |
74bc4151 RPM |
642 | #ifndef ENODATA |
643 | #define ENODATA ENOENT | |
644 | #endif | |
645 | if (errno == ENODATA) { | |
8aba7dc0 AP |
646 | memory_region_set_dirty(framebuffer, 0, size); |
647 | DPRINTF("xen: track_dirty_vram failed (0x" TARGET_FMT_plx | |
b18620cf | 648 | ", 0x" TARGET_FMT_plx "): %s\n", |
74bc4151 | 649 | start_addr, start_addr + size, strerror(errno)); |
b18620cf AP |
650 | } |
651 | return; | |
b4dd7802 AP |
652 | } |
653 | ||
34fbbc16 AP |
654 | for (i = 0; i < bitmap_size; i++) { |
655 | unsigned long map = state->dirty_bitmap[i]; | |
b4dd7802 | 656 | while (map != 0) { |
adf9d70b | 657 | j = ctzl(map); |
b4dd7802 | 658 | map &= ~(1ul << j); |
5a97065b | 659 | memory_region_set_dirty(framebuffer, |
fd4aa979 BS |
660 | (i * width + j) * TARGET_PAGE_SIZE, |
661 | TARGET_PAGE_SIZE); | |
b4dd7802 AP |
662 | }; |
663 | } | |
b4dd7802 AP |
664 | } |
665 | ||
20581d20 | 666 | static void xen_log_start(MemoryListener *listener, |
b2dfd71c PB |
667 | MemoryRegionSection *section, |
668 | int old, int new) | |
b4dd7802 | 669 | { |
20581d20 | 670 | XenIOState *state = container_of(listener, XenIOState, memory_listener); |
b4dd7802 | 671 | |
b2dfd71c PB |
672 | if (new & ~old & (1 << DIRTY_MEMORY_VGA)) { |
673 | xen_sync_dirty_bitmap(state, section->offset_within_address_space, | |
674 | int128_get64(section->size)); | |
675 | } | |
b4dd7802 AP |
676 | } |
677 | ||
b2dfd71c PB |
678 | static void xen_log_stop(MemoryListener *listener, MemoryRegionSection *section, |
679 | int old, int new) | |
b4dd7802 | 680 | { |
20581d20 | 681 | XenIOState *state = container_of(listener, XenIOState, memory_listener); |
b4dd7802 | 682 | |
b2dfd71c PB |
683 | if (old & ~new & (1 << DIRTY_MEMORY_VGA)) { |
684 | state->log_for_dirtybit = NULL; | |
34fbbc16 AP |
685 | g_free(state->dirty_bitmap); |
686 | state->dirty_bitmap = NULL; | |
b2dfd71c | 687 | /* Disable dirty bit tracking */ |
8f25e754 | 688 | xen_track_dirty_vram(xen_domid, 0, 0, NULL); |
b2dfd71c | 689 | } |
b4dd7802 AP |
690 | } |
691 | ||
20581d20 | 692 | static void xen_log_sync(MemoryListener *listener, MemoryRegionSection *section) |
b4dd7802 | 693 | { |
20581d20 | 694 | XenIOState *state = container_of(listener, XenIOState, memory_listener); |
b4dd7802 | 695 | |
b18620cf | 696 | xen_sync_dirty_bitmap(state, section->offset_within_address_space, |
052e87b0 | 697 | int128_get64(section->size)); |
b4dd7802 AP |
698 | } |
699 | ||
20581d20 AK |
700 | static void xen_log_global_start(MemoryListener *listener) |
701 | { | |
39f42439 AP |
702 | if (xen_enabled()) { |
703 | xen_in_migration = true; | |
704 | } | |
20581d20 AK |
705 | } |
706 | ||
707 | static void xen_log_global_stop(MemoryListener *listener) | |
b4dd7802 | 708 | { |
39f42439 | 709 | xen_in_migration = false; |
b4dd7802 AP |
710 | } |
711 | ||
20581d20 AK |
712 | static MemoryListener xen_memory_listener = { |
713 | .region_add = xen_region_add, | |
714 | .region_del = xen_region_del, | |
b4dd7802 AP |
715 | .log_start = xen_log_start, |
716 | .log_stop = xen_log_stop, | |
20581d20 AK |
717 | .log_sync = xen_log_sync, |
718 | .log_global_start = xen_log_global_start, | |
719 | .log_global_stop = xen_log_global_stop, | |
72e22d2f | 720 | .priority = 10, |
b4dd7802 | 721 | }; |
432d268c | 722 | |
3996e85c PD |
723 | static MemoryListener xen_io_listener = { |
724 | .region_add = xen_io_add, | |
725 | .region_del = xen_io_del, | |
726 | .priority = 10, | |
727 | }; | |
728 | ||
729 | static DeviceListener xen_device_listener = { | |
730 | .realize = xen_device_realize, | |
731 | .unrealize = xen_device_unrealize, | |
732 | }; | |
733 | ||
9ce94e7c AS |
734 | /* get the ioreq packets from share mem */ |
735 | static ioreq_t *cpu_get_ioreq_from_shared_memory(XenIOState *state, int vcpu) | |
736 | { | |
737 | ioreq_t *req = xen_vcpu_ioreq(state->shared_page, vcpu); | |
738 | ||
739 | if (req->state != STATE_IOREQ_READY) { | |
740 | DPRINTF("I/O request not ready: " | |
741 | "%x, ptr: %x, port: %"PRIx64", " | |
47d3df23 | 742 | "data: %"PRIx64", count: %u, size: %u\n", |
9ce94e7c AS |
743 | req->state, req->data_is_ptr, req->addr, |
744 | req->data, req->count, req->size); | |
745 | return NULL; | |
746 | } | |
747 | ||
748 | xen_rmb(); /* see IOREQ_READY /then/ read contents of ioreq */ | |
749 | ||
750 | req->state = STATE_IOREQ_INPROCESS; | |
751 | return req; | |
752 | } | |
753 | ||
754 | /* use poll to get the port notification */ | |
755 | /* ioreq_vec--out,the */ | |
756 | /* retval--the number of ioreq packet */ | |
757 | static ioreq_t *cpu_get_ioreq(XenIOState *state) | |
758 | { | |
0e11fc69 LX |
759 | MachineState *ms = MACHINE(qdev_get_machine()); |
760 | unsigned int max_cpus = ms->smp.max_cpus; | |
9ce94e7c AS |
761 | int i; |
762 | evtchn_port_t port; | |
763 | ||
a2db2a1e | 764 | port = xenevtchn_pending(state->xce_handle); |
fda1f768 | 765 | if (port == state->bufioreq_local_port) { |
bc72ad67 AB |
766 | timer_mod(state->buffered_io_timer, |
767 | BUFFER_IO_MAX_DELAY + qemu_clock_get_ms(QEMU_CLOCK_REALTIME)); | |
fda1f768 SS |
768 | return NULL; |
769 | } | |
770 | ||
9ce94e7c | 771 | if (port != -1) { |
1cd25a88 | 772 | for (i = 0; i < max_cpus; i++) { |
9ce94e7c AS |
773 | if (state->ioreq_local_port[i] == port) { |
774 | break; | |
775 | } | |
776 | } | |
777 | ||
1cd25a88 | 778 | if (i == max_cpus) { |
9ce94e7c AS |
779 | hw_error("Fatal error while trying to get io event!\n"); |
780 | } | |
781 | ||
782 | /* unmask the wanted port again */ | |
a2db2a1e | 783 | xenevtchn_unmask(state->xce_handle, port); |
9ce94e7c AS |
784 | |
785 | /* get the io packet from shared memory */ | |
786 | state->send_vcpu = i; | |
787 | return cpu_get_ioreq_from_shared_memory(state, i); | |
788 | } | |
789 | ||
790 | /* read error or read nothing */ | |
791 | return NULL; | |
792 | } | |
793 | ||
89a80e74 | 794 | static uint32_t do_inp(uint32_t addr, unsigned long size) |
9ce94e7c AS |
795 | { |
796 | switch (size) { | |
797 | case 1: | |
798 | return cpu_inb(addr); | |
799 | case 2: | |
800 | return cpu_inw(addr); | |
801 | case 4: | |
802 | return cpu_inl(addr); | |
803 | default: | |
89a80e74 | 804 | hw_error("inp: bad size: %04x %lx", addr, size); |
9ce94e7c AS |
805 | } |
806 | } | |
807 | ||
89a80e74 | 808 | static void do_outp(uint32_t addr, |
9ce94e7c AS |
809 | unsigned long size, uint32_t val) |
810 | { | |
811 | switch (size) { | |
812 | case 1: | |
813 | return cpu_outb(addr, val); | |
814 | case 2: | |
815 | return cpu_outw(addr, val); | |
816 | case 4: | |
817 | return cpu_outl(addr, val); | |
818 | default: | |
89a80e74 | 819 | hw_error("outp: bad size: %04x %lx", addr, size); |
9ce94e7c AS |
820 | } |
821 | } | |
822 | ||
a3864829 IJ |
823 | /* |
824 | * Helper functions which read/write an object from/to physical guest | |
825 | * memory, as part of the implementation of an ioreq. | |
826 | * | |
827 | * Equivalent to | |
828 | * cpu_physical_memory_rw(addr + (req->df ? -1 : +1) * req->size * i, | |
829 | * val, req->size, 0/1) | |
830 | * except without the integer overflow problems. | |
831 | */ | |
832 | static void rw_phys_req_item(hwaddr addr, | |
833 | ioreq_t *req, uint32_t i, void *val, int rw) | |
834 | { | |
835 | /* Do everything unsigned so overflow just results in a truncated result | |
836 | * and accesses to undesired parts of guest memory, which is up | |
837 | * to the guest */ | |
838 | hwaddr offset = (hwaddr)req->size * i; | |
839 | if (req->df) { | |
840 | addr -= offset; | |
841 | } else { | |
842 | addr += offset; | |
843 | } | |
844 | cpu_physical_memory_rw(addr, val, req->size, rw); | |
845 | } | |
846 | ||
847 | static inline void read_phys_req_item(hwaddr addr, | |
848 | ioreq_t *req, uint32_t i, void *val) | |
9ce94e7c | 849 | { |
a3864829 IJ |
850 | rw_phys_req_item(addr, req, i, val, 0); |
851 | } | |
852 | static inline void write_phys_req_item(hwaddr addr, | |
853 | ioreq_t *req, uint32_t i, void *val) | |
854 | { | |
855 | rw_phys_req_item(addr, req, i, val, 1); | |
856 | } | |
9ce94e7c | 857 | |
a3864829 IJ |
858 | |
859 | static void cpu_ioreq_pio(ioreq_t *req) | |
860 | { | |
249e7e0f | 861 | uint32_t i; |
9ce94e7c | 862 | |
eeb6b13a DS |
863 | trace_cpu_ioreq_pio(req, req->dir, req->df, req->data_is_ptr, req->addr, |
864 | req->data, req->count, req->size); | |
865 | ||
b85f9dfd JB |
866 | if (req->size > sizeof(uint32_t)) { |
867 | hw_error("PIO: bad size (%u)", req->size); | |
868 | } | |
869 | ||
9ce94e7c AS |
870 | if (req->dir == IOREQ_READ) { |
871 | if (!req->data_is_ptr) { | |
872 | req->data = do_inp(req->addr, req->size); | |
eeb6b13a DS |
873 | trace_cpu_ioreq_pio_read_reg(req, req->data, req->addr, |
874 | req->size); | |
9ce94e7c AS |
875 | } else { |
876 | uint32_t tmp; | |
877 | ||
878 | for (i = 0; i < req->count; i++) { | |
879 | tmp = do_inp(req->addr, req->size); | |
a3864829 | 880 | write_phys_req_item(req->data, req, i, &tmp); |
9ce94e7c AS |
881 | } |
882 | } | |
883 | } else if (req->dir == IOREQ_WRITE) { | |
884 | if (!req->data_is_ptr) { | |
eeb6b13a DS |
885 | trace_cpu_ioreq_pio_write_reg(req, req->data, req->addr, |
886 | req->size); | |
9ce94e7c AS |
887 | do_outp(req->addr, req->size, req->data); |
888 | } else { | |
889 | for (i = 0; i < req->count; i++) { | |
890 | uint32_t tmp = 0; | |
891 | ||
a3864829 | 892 | read_phys_req_item(req->data, req, i, &tmp); |
9ce94e7c AS |
893 | do_outp(req->addr, req->size, tmp); |
894 | } | |
895 | } | |
896 | } | |
897 | } | |
898 | ||
899 | static void cpu_ioreq_move(ioreq_t *req) | |
900 | { | |
249e7e0f | 901 | uint32_t i; |
9ce94e7c | 902 | |
eeb6b13a DS |
903 | trace_cpu_ioreq_move(req, req->dir, req->df, req->data_is_ptr, req->addr, |
904 | req->data, req->count, req->size); | |
905 | ||
b85f9dfd JB |
906 | if (req->size > sizeof(req->data)) { |
907 | hw_error("MMIO: bad size (%u)", req->size); | |
908 | } | |
909 | ||
9ce94e7c AS |
910 | if (!req->data_is_ptr) { |
911 | if (req->dir == IOREQ_READ) { | |
912 | for (i = 0; i < req->count; i++) { | |
a3864829 | 913 | read_phys_req_item(req->addr, req, i, &req->data); |
9ce94e7c AS |
914 | } |
915 | } else if (req->dir == IOREQ_WRITE) { | |
916 | for (i = 0; i < req->count; i++) { | |
a3864829 | 917 | write_phys_req_item(req->addr, req, i, &req->data); |
9ce94e7c AS |
918 | } |
919 | } | |
920 | } else { | |
2b734340 | 921 | uint64_t tmp; |
9ce94e7c AS |
922 | |
923 | if (req->dir == IOREQ_READ) { | |
924 | for (i = 0; i < req->count; i++) { | |
a3864829 IJ |
925 | read_phys_req_item(req->addr, req, i, &tmp); |
926 | write_phys_req_item(req->data, req, i, &tmp); | |
9ce94e7c AS |
927 | } |
928 | } else if (req->dir == IOREQ_WRITE) { | |
929 | for (i = 0; i < req->count; i++) { | |
a3864829 IJ |
930 | read_phys_req_item(req->data, req, i, &tmp); |
931 | write_phys_req_item(req->addr, req, i, &tmp); | |
9ce94e7c AS |
932 | } |
933 | } | |
934 | } | |
935 | } | |
936 | ||
dfb6578d PD |
937 | static void cpu_ioreq_config(XenIOState *state, ioreq_t *req) |
938 | { | |
939 | uint32_t sbdf = req->addr >> 32; | |
940 | uint32_t reg = req->addr; | |
941 | XenPciDevice *xendev; | |
942 | ||
943 | if (req->size != sizeof(uint8_t) && req->size != sizeof(uint16_t) && | |
944 | req->size != sizeof(uint32_t)) { | |
945 | hw_error("PCI config access: bad size (%u)", req->size); | |
946 | } | |
947 | ||
948 | if (req->count != 1) { | |
949 | hw_error("PCI config access: bad count (%u)", req->count); | |
950 | } | |
951 | ||
952 | QLIST_FOREACH(xendev, &state->dev_list, entry) { | |
953 | if (xendev->sbdf != sbdf) { | |
954 | continue; | |
955 | } | |
956 | ||
957 | if (!req->data_is_ptr) { | |
958 | if (req->dir == IOREQ_READ) { | |
959 | req->data = pci_host_config_read_common( | |
960 | xendev->pci_dev, reg, PCI_CONFIG_SPACE_SIZE, | |
961 | req->size); | |
962 | trace_cpu_ioreq_config_read(req, xendev->sbdf, reg, | |
963 | req->size, req->data); | |
964 | } else if (req->dir == IOREQ_WRITE) { | |
965 | trace_cpu_ioreq_config_write(req, xendev->sbdf, reg, | |
966 | req->size, req->data); | |
967 | pci_host_config_write_common( | |
968 | xendev->pci_dev, reg, PCI_CONFIG_SPACE_SIZE, | |
969 | req->data, req->size); | |
970 | } | |
971 | } else { | |
972 | uint32_t tmp; | |
973 | ||
974 | if (req->dir == IOREQ_READ) { | |
975 | tmp = pci_host_config_read_common( | |
976 | xendev->pci_dev, reg, PCI_CONFIG_SPACE_SIZE, | |
977 | req->size); | |
978 | trace_cpu_ioreq_config_read(req, xendev->sbdf, reg, | |
979 | req->size, tmp); | |
980 | write_phys_req_item(req->data, req, 0, &tmp); | |
981 | } else if (req->dir == IOREQ_WRITE) { | |
982 | read_phys_req_item(req->data, req, 0, &tmp); | |
983 | trace_cpu_ioreq_config_write(req, xendev->sbdf, reg, | |
984 | req->size, tmp); | |
985 | pci_host_config_write_common( | |
986 | xendev->pci_dev, reg, PCI_CONFIG_SPACE_SIZE, | |
987 | tmp, req->size); | |
988 | } | |
989 | } | |
990 | } | |
991 | } | |
992 | ||
37f9e258 DS |
993 | static void regs_to_cpu(vmware_regs_t *vmport_regs, ioreq_t *req) |
994 | { | |
995 | X86CPU *cpu; | |
996 | CPUX86State *env; | |
997 | ||
998 | cpu = X86_CPU(current_cpu); | |
999 | env = &cpu->env; | |
1000 | env->regs[R_EAX] = req->data; | |
1001 | env->regs[R_EBX] = vmport_regs->ebx; | |
1002 | env->regs[R_ECX] = vmport_regs->ecx; | |
1003 | env->regs[R_EDX] = vmport_regs->edx; | |
1004 | env->regs[R_ESI] = vmport_regs->esi; | |
1005 | env->regs[R_EDI] = vmport_regs->edi; | |
1006 | } | |
1007 | ||
1008 | static void regs_from_cpu(vmware_regs_t *vmport_regs) | |
1009 | { | |
1010 | X86CPU *cpu = X86_CPU(current_cpu); | |
1011 | CPUX86State *env = &cpu->env; | |
1012 | ||
1013 | vmport_regs->ebx = env->regs[R_EBX]; | |
1014 | vmport_regs->ecx = env->regs[R_ECX]; | |
1015 | vmport_regs->edx = env->regs[R_EDX]; | |
1016 | vmport_regs->esi = env->regs[R_ESI]; | |
1017 | vmport_regs->edi = env->regs[R_EDI]; | |
1018 | } | |
1019 | ||
1020 | static void handle_vmport_ioreq(XenIOState *state, ioreq_t *req) | |
1021 | { | |
1022 | vmware_regs_t *vmport_regs; | |
1023 | ||
1024 | assert(state->shared_vmport_page); | |
1025 | vmport_regs = | |
1026 | &state->shared_vmport_page->vcpu_vmport_regs[state->send_vcpu]; | |
1027 | QEMU_BUILD_BUG_ON(sizeof(*req) < sizeof(*vmport_regs)); | |
1028 | ||
1029 | current_cpu = state->cpu_by_vcpu_id[state->send_vcpu]; | |
1030 | regs_to_cpu(vmport_regs, req); | |
1031 | cpu_ioreq_pio(req); | |
1032 | regs_from_cpu(vmport_regs); | |
1033 | current_cpu = NULL; | |
1034 | } | |
1035 | ||
1036 | static void handle_ioreq(XenIOState *state, ioreq_t *req) | |
9ce94e7c | 1037 | { |
eeb6b13a DS |
1038 | trace_handle_ioreq(req, req->type, req->dir, req->df, req->data_is_ptr, |
1039 | req->addr, req->data, req->count, req->size); | |
1040 | ||
9ce94e7c AS |
1041 | if (!req->data_is_ptr && (req->dir == IOREQ_WRITE) && |
1042 | (req->size < sizeof (target_ulong))) { | |
1043 | req->data &= ((target_ulong) 1 << (8 * req->size)) - 1; | |
1044 | } | |
1045 | ||
eeb6b13a DS |
1046 | if (req->dir == IOREQ_WRITE) |
1047 | trace_handle_ioreq_write(req, req->type, req->df, req->data_is_ptr, | |
1048 | req->addr, req->data, req->count, req->size); | |
1049 | ||
9ce94e7c AS |
1050 | switch (req->type) { |
1051 | case IOREQ_TYPE_PIO: | |
1052 | cpu_ioreq_pio(req); | |
1053 | break; | |
1054 | case IOREQ_TYPE_COPY: | |
1055 | cpu_ioreq_move(req); | |
1056 | break; | |
37f9e258 DS |
1057 | case IOREQ_TYPE_VMWARE_PORT: |
1058 | handle_vmport_ioreq(state, req); | |
1059 | break; | |
9ce94e7c AS |
1060 | case IOREQ_TYPE_TIMEOFFSET: |
1061 | break; | |
1062 | case IOREQ_TYPE_INVALIDATE: | |
e41d7c69 | 1063 | xen_invalidate_map_cache(); |
9ce94e7c | 1064 | break; |
dfb6578d PD |
1065 | case IOREQ_TYPE_PCI_CONFIG: |
1066 | cpu_ioreq_config(state, req); | |
3996e85c | 1067 | break; |
9ce94e7c AS |
1068 | default: |
1069 | hw_error("Invalid ioreq type 0x%x\n", req->type); | |
1070 | } | |
eeb6b13a DS |
1071 | if (req->dir == IOREQ_READ) { |
1072 | trace_handle_ioreq_read(req, req->type, req->df, req->data_is_ptr, | |
1073 | req->addr, req->data, req->count, req->size); | |
1074 | } | |
9ce94e7c AS |
1075 | } |
1076 | ||
fda1f768 | 1077 | static int handle_buffered_iopage(XenIOState *state) |
9ce94e7c | 1078 | { |
d8b441a3 | 1079 | buffered_iopage_t *buf_page = state->buffered_io_page; |
9ce94e7c AS |
1080 | buf_ioreq_t *buf_req = NULL; |
1081 | ioreq_t req; | |
1082 | int qw; | |
1083 | ||
d8b441a3 | 1084 | if (!buf_page) { |
fda1f768 | 1085 | return 0; |
9ce94e7c AS |
1086 | } |
1087 | ||
fda1f768 | 1088 | memset(&req, 0x00, sizeof(req)); |
f37f29d3 JB |
1089 | req.state = STATE_IOREQ_READY; |
1090 | req.count = 1; | |
e514379d | 1091 | req.dir = IOREQ_WRITE; |
fda1f768 | 1092 | |
d8b441a3 JB |
1093 | for (;;) { |
1094 | uint32_t rdptr = buf_page->read_pointer, wrptr; | |
1095 | ||
1096 | xen_rmb(); | |
1097 | wrptr = buf_page->write_pointer; | |
1098 | xen_rmb(); | |
1099 | if (rdptr != buf_page->read_pointer) { | |
1100 | continue; | |
1101 | } | |
1102 | if (rdptr == wrptr) { | |
1103 | break; | |
1104 | } | |
1105 | buf_req = &buf_page->buf_ioreq[rdptr % IOREQ_BUFFER_SLOT_NUM]; | |
f37f29d3 | 1106 | req.size = 1U << buf_req->size; |
9ce94e7c AS |
1107 | req.addr = buf_req->addr; |
1108 | req.data = buf_req->data; | |
9ce94e7c | 1109 | req.type = buf_req->type; |
b85f9dfd | 1110 | xen_rmb(); |
9ce94e7c AS |
1111 | qw = (req.size == 8); |
1112 | if (qw) { | |
ff3b8b8f JB |
1113 | if (rdptr + 1 == wrptr) { |
1114 | hw_error("Incomplete quad word buffered ioreq"); | |
1115 | } | |
d8b441a3 JB |
1116 | buf_req = &buf_page->buf_ioreq[(rdptr + 1) % |
1117 | IOREQ_BUFFER_SLOT_NUM]; | |
9ce94e7c | 1118 | req.data |= ((uint64_t)buf_req->data) << 32; |
b85f9dfd | 1119 | xen_rmb(); |
9ce94e7c AS |
1120 | } |
1121 | ||
37f9e258 | 1122 | handle_ioreq(state, &req); |
9ce94e7c | 1123 | |
f37f29d3 | 1124 | /* Only req.data may get updated by handle_ioreq(), albeit even that |
e514379d JB |
1125 | * should not happen as such data would never make it to the guest (we |
1126 | * can only usefully see writes here after all). | |
f37f29d3 JB |
1127 | */ |
1128 | assert(req.state == STATE_IOREQ_READY); | |
1129 | assert(req.count == 1); | |
e514379d | 1130 | assert(req.dir == IOREQ_WRITE); |
f37f29d3 JB |
1131 | assert(!req.data_is_ptr); |
1132 | ||
d8b441a3 | 1133 | atomic_add(&buf_page->read_pointer, qw + 1); |
9ce94e7c | 1134 | } |
fda1f768 SS |
1135 | |
1136 | return req.count; | |
9ce94e7c AS |
1137 | } |
1138 | ||
1139 | static void handle_buffered_io(void *opaque) | |
1140 | { | |
1141 | XenIOState *state = opaque; | |
1142 | ||
fda1f768 | 1143 | if (handle_buffered_iopage(state)) { |
bc72ad67 AB |
1144 | timer_mod(state->buffered_io_timer, |
1145 | BUFFER_IO_MAX_DELAY + qemu_clock_get_ms(QEMU_CLOCK_REALTIME)); | |
fda1f768 | 1146 | } else { |
bc72ad67 | 1147 | timer_del(state->buffered_io_timer); |
a2db2a1e | 1148 | xenevtchn_unmask(state->xce_handle, state->bufioreq_local_port); |
fda1f768 | 1149 | } |
9ce94e7c AS |
1150 | } |
1151 | ||
1152 | static void cpu_handle_ioreq(void *opaque) | |
1153 | { | |
1154 | XenIOState *state = opaque; | |
1155 | ioreq_t *req = cpu_get_ioreq(state); | |
1156 | ||
1157 | handle_buffered_iopage(state); | |
1158 | if (req) { | |
b85f9dfd JB |
1159 | ioreq_t copy = *req; |
1160 | ||
1161 | xen_rmb(); | |
1162 | handle_ioreq(state, ©); | |
1163 | req->data = copy.data; | |
9ce94e7c AS |
1164 | |
1165 | if (req->state != STATE_IOREQ_INPROCESS) { | |
1166 | fprintf(stderr, "Badness in I/O request ... not in service?!: " | |
1167 | "%x, ptr: %x, port: %"PRIx64", " | |
47d3df23 | 1168 | "data: %"PRIx64", count: %u, size: %u, type: %u\n", |
9ce94e7c | 1169 | req->state, req->data_is_ptr, req->addr, |
37f9e258 | 1170 | req->data, req->count, req->size, req->type); |
180640ea | 1171 | destroy_hvm_domain(false); |
9ce94e7c AS |
1172 | return; |
1173 | } | |
1174 | ||
1175 | xen_wmb(); /* Update ioreq contents /then/ update state. */ | |
1176 | ||
1177 | /* | |
1178 | * We do this before we send the response so that the tools | |
1179 | * have the opportunity to pick up on the reset before the | |
1180 | * guest resumes and does a hlt with interrupts disabled which | |
1181 | * causes Xen to powerdown the domain. | |
1182 | */ | |
1354869c | 1183 | if (runstate_is_running()) { |
aedbe192 EB |
1184 | ShutdownCause request; |
1185 | ||
9ce94e7c | 1186 | if (qemu_shutdown_requested_get()) { |
180640ea | 1187 | destroy_hvm_domain(false); |
9ce94e7c | 1188 | } |
aedbe192 EB |
1189 | request = qemu_reset_requested_get(); |
1190 | if (request) { | |
1191 | qemu_system_reset(request); | |
180640ea | 1192 | destroy_hvm_domain(true); |
9ce94e7c AS |
1193 | } |
1194 | } | |
1195 | ||
1196 | req->state = STATE_IORESP_READY; | |
a2db2a1e IC |
1197 | xenevtchn_notify(state->xce_handle, |
1198 | state->ioreq_local_port[state->send_vcpu]); | |
9ce94e7c AS |
1199 | } |
1200 | } | |
1201 | ||
1202 | static void xen_main_loop_prepare(XenIOState *state) | |
1203 | { | |
1204 | int evtchn_fd = -1; | |
1205 | ||
a2db2a1e IC |
1206 | if (state->xce_handle != NULL) { |
1207 | evtchn_fd = xenevtchn_fd(state->xce_handle); | |
9ce94e7c AS |
1208 | } |
1209 | ||
bc72ad67 | 1210 | state->buffered_io_timer = timer_new_ms(QEMU_CLOCK_REALTIME, handle_buffered_io, |
9ce94e7c | 1211 | state); |
9ce94e7c AS |
1212 | |
1213 | if (evtchn_fd != -1) { | |
37f9e258 DS |
1214 | CPUState *cpu_state; |
1215 | ||
1216 | DPRINTF("%s: Init cpu_by_vcpu_id\n", __func__); | |
1217 | CPU_FOREACH(cpu_state) { | |
1218 | DPRINTF("%s: cpu_by_vcpu_id[%d]=%p\n", | |
1219 | __func__, cpu_state->cpu_index, cpu_state); | |
1220 | state->cpu_by_vcpu_id[cpu_state->cpu_index] = cpu_state; | |
1221 | } | |
9ce94e7c AS |
1222 | qemu_set_fd_handler(evtchn_fd, cpu_handle_ioreq, NULL, state); |
1223 | } | |
1224 | } | |
1225 | ||
1226 | ||
1dfb4dd9 LC |
1227 | static void xen_hvm_change_state_handler(void *opaque, int running, |
1228 | RunState rstate) | |
9ce94e7c | 1229 | { |
3996e85c PD |
1230 | XenIOState *state = opaque; |
1231 | ||
9ce94e7c | 1232 | if (running) { |
3996e85c | 1233 | xen_main_loop_prepare(state); |
9ce94e7c | 1234 | } |
3996e85c | 1235 | |
260cabed | 1236 | xen_set_ioreq_server_state(xen_domid, |
3996e85c PD |
1237 | state->ioservid, |
1238 | (rstate == RUN_STATE_RUNNING)); | |
9ce94e7c AS |
1239 | } |
1240 | ||
9e8dd451 | 1241 | static void xen_exit_notifier(Notifier *n, void *data) |
9ce94e7c AS |
1242 | { |
1243 | XenIOState *state = container_of(n, XenIOState, exit); | |
1244 | ||
a2db2a1e | 1245 | xenevtchn_close(state->xce_handle); |
29321335 | 1246 | xs_daemon_close(state->xenstore); |
9ce94e7c AS |
1247 | } |
1248 | ||
331b5189 | 1249 | #ifdef XEN_COMPAT_PHYSMAP |
d1814e08 SS |
1250 | static void xen_read_physmap(XenIOState *state) |
1251 | { | |
1252 | XenPhysmap *physmap = NULL; | |
1253 | unsigned int len, num, i; | |
1254 | char path[80], *value = NULL; | |
1255 | char **entries = NULL; | |
1256 | ||
1257 | snprintf(path, sizeof(path), | |
1258 | "/local/domain/0/device-model/%d/physmap", xen_domid); | |
1259 | entries = xs_directory(state->xenstore, 0, path, &num); | |
1260 | if (entries == NULL) | |
1261 | return; | |
1262 | ||
1263 | for (i = 0; i < num; i++) { | |
1264 | physmap = g_malloc(sizeof (XenPhysmap)); | |
1265 | physmap->phys_offset = strtoull(entries[i], NULL, 16); | |
1266 | snprintf(path, sizeof(path), | |
1267 | "/local/domain/0/device-model/%d/physmap/%s/start_addr", | |
1268 | xen_domid, entries[i]); | |
1269 | value = xs_read(state->xenstore, 0, path, &len); | |
1270 | if (value == NULL) { | |
c5633d99 | 1271 | g_free(physmap); |
d1814e08 SS |
1272 | continue; |
1273 | } | |
1274 | physmap->start_addr = strtoull(value, NULL, 16); | |
1275 | free(value); | |
1276 | ||
1277 | snprintf(path, sizeof(path), | |
1278 | "/local/domain/0/device-model/%d/physmap/%s/size", | |
1279 | xen_domid, entries[i]); | |
1280 | value = xs_read(state->xenstore, 0, path, &len); | |
1281 | if (value == NULL) { | |
c5633d99 | 1282 | g_free(physmap); |
d1814e08 SS |
1283 | continue; |
1284 | } | |
1285 | physmap->size = strtoull(value, NULL, 16); | |
1286 | free(value); | |
1287 | ||
1288 | snprintf(path, sizeof(path), | |
1289 | "/local/domain/0/device-model/%d/physmap/%s/name", | |
1290 | xen_domid, entries[i]); | |
1291 | physmap->name = xs_read(state->xenstore, 0, path, &len); | |
1292 | ||
04a8f72e | 1293 | QLIST_INSERT_HEAD(&xen_physmap, physmap, list); |
d1814e08 SS |
1294 | } |
1295 | free(entries); | |
d1814e08 | 1296 | } |
331b5189 ID |
1297 | #else |
1298 | static void xen_read_physmap(XenIOState *state) | |
1299 | { | |
1300 | } | |
1301 | #endif | |
d1814e08 | 1302 | |
11addd0a LJ |
1303 | static void xen_wakeup_notifier(Notifier *notifier, void *data) |
1304 | { | |
1305 | xc_set_hvm_param(xen_xc, xen_domid, HVM_PARAM_ACPI_S_STATE, 0); | |
1306 | } | |
1307 | ||
71cec1ed | 1308 | static int xen_map_ioreq_server(XenIOState *state) |
29d3ccde | 1309 | { |
d3c49ebb PD |
1310 | void *addr = NULL; |
1311 | xenforeignmemory_resource_handle *fres; | |
3996e85c PD |
1312 | xen_pfn_t ioreq_pfn; |
1313 | xen_pfn_t bufioreq_pfn; | |
1314 | evtchn_port_t bufioreq_evtchn; | |
71cec1ed PD |
1315 | int rc; |
1316 | ||
d3c49ebb PD |
1317 | /* |
1318 | * Attempt to map using the resource API and fall back to normal | |
1319 | * foreign mapping if this is not supported. | |
1320 | */ | |
1321 | QEMU_BUILD_BUG_ON(XENMEM_resource_ioreq_server_frame_bufioreq != 0); | |
1322 | QEMU_BUILD_BUG_ON(XENMEM_resource_ioreq_server_frame_ioreq(0) != 1); | |
1323 | fres = xenforeignmemory_map_resource(xen_fmem, xen_domid, | |
1324 | XENMEM_resource_ioreq_server, | |
1325 | state->ioservid, 0, 2, | |
1326 | &addr, | |
1327 | PROT_READ | PROT_WRITE, 0); | |
1328 | if (fres != NULL) { | |
1329 | trace_xen_map_resource_ioreq(state->ioservid, addr); | |
1330 | state->buffered_io_page = addr; | |
1331 | state->shared_page = addr + TARGET_PAGE_SIZE; | |
1332 | } else if (errno != EOPNOTSUPP) { | |
1333 | error_report("failed to map ioreq server resources: error %d handle=%p", | |
1334 | errno, xen_xc); | |
1335 | return -1; | |
1336 | } | |
1337 | ||
71cec1ed | 1338 | rc = xen_get_ioreq_server_info(xen_domid, state->ioservid, |
d3c49ebb PD |
1339 | (state->shared_page == NULL) ? |
1340 | &ioreq_pfn : NULL, | |
1341 | (state->buffered_io_page == NULL) ? | |
1342 | &bufioreq_pfn : NULL, | |
71cec1ed PD |
1343 | &bufioreq_evtchn); |
1344 | if (rc < 0) { | |
1345 | error_report("failed to get ioreq server info: error %d handle=%p", | |
1346 | errno, xen_xc); | |
1347 | return rc; | |
1348 | } | |
1349 | ||
71cec1ed | 1350 | if (state->shared_page == NULL) { |
d3c49ebb PD |
1351 | DPRINTF("shared page at pfn %lx\n", ioreq_pfn); |
1352 | ||
1353 | state->shared_page = xenforeignmemory_map(xen_fmem, xen_domid, | |
1354 | PROT_READ | PROT_WRITE, | |
1355 | 1, &ioreq_pfn, NULL); | |
1356 | if (state->shared_page == NULL) { | |
1357 | error_report("map shared IO page returned error %d handle=%p", | |
1358 | errno, xen_xc); | |
1359 | } | |
71cec1ed PD |
1360 | } |
1361 | ||
71cec1ed | 1362 | if (state->buffered_io_page == NULL) { |
d3c49ebb PD |
1363 | DPRINTF("buffered io page at pfn %lx\n", bufioreq_pfn); |
1364 | ||
1365 | state->buffered_io_page = xenforeignmemory_map(xen_fmem, xen_domid, | |
1366 | PROT_READ | PROT_WRITE, | |
1367 | 1, &bufioreq_pfn, | |
1368 | NULL); | |
1369 | if (state->buffered_io_page == NULL) { | |
1370 | error_report("map buffered IO page returned error %d", errno); | |
1371 | return -1; | |
1372 | } | |
1373 | } | |
1374 | ||
1375 | if (state->shared_page == NULL || state->buffered_io_page == NULL) { | |
71cec1ed PD |
1376 | return -1; |
1377 | } | |
1378 | ||
d3c49ebb PD |
1379 | DPRINTF("buffered io evtchn is %x\n", bufioreq_evtchn); |
1380 | ||
71cec1ed PD |
1381 | state->bufioreq_remote_port = bufioreq_evtchn; |
1382 | ||
1383 | return 0; | |
1384 | } | |
1385 | ||
1386 | void xen_hvm_init(PCMachineState *pcms, MemoryRegion **ram_memory) | |
1387 | { | |
0e11fc69 LX |
1388 | MachineState *ms = MACHINE(pcms); |
1389 | unsigned int max_cpus = ms->smp.max_cpus; | |
71cec1ed PD |
1390 | int i, rc; |
1391 | xen_pfn_t ioreq_pfn; | |
9ce94e7c AS |
1392 | XenIOState *state; |
1393 | ||
7267c094 | 1394 | state = g_malloc0(sizeof (XenIOState)); |
9ce94e7c | 1395 | |
a2db2a1e IC |
1396 | state->xce_handle = xenevtchn_open(NULL, 0); |
1397 | if (state->xce_handle == NULL) { | |
9ce94e7c | 1398 | perror("xen: event channel open"); |
dced4d2f | 1399 | goto err; |
9ce94e7c AS |
1400 | } |
1401 | ||
29321335 AP |
1402 | state->xenstore = xs_daemon_open(); |
1403 | if (state->xenstore == NULL) { | |
1404 | perror("xen: xenstore open"); | |
dced4d2f | 1405 | goto err; |
29321335 AP |
1406 | } |
1407 | ||
260cabed | 1408 | xen_create_ioreq_server(xen_domid, &state->ioservid); |
3996e85c | 1409 | |
9ce94e7c AS |
1410 | state->exit.notify = xen_exit_notifier; |
1411 | qemu_add_exit_notifier(&state->exit); | |
1412 | ||
da98c8eb GH |
1413 | state->suspend.notify = xen_suspend_notifier; |
1414 | qemu_register_suspend_notifier(&state->suspend); | |
1415 | ||
11addd0a LJ |
1416 | state->wakeup.notify = xen_wakeup_notifier; |
1417 | qemu_register_wakeup_notifier(&state->wakeup); | |
1418 | ||
46ea94ca DHB |
1419 | /* |
1420 | * Register wake-up support in QMP query-current-machine API | |
1421 | */ | |
1422 | qemu_register_wakeup_support(); | |
1423 | ||
71cec1ed | 1424 | rc = xen_map_ioreq_server(state); |
3996e85c | 1425 | if (rc < 0) { |
dced4d2f | 1426 | goto err; |
9ce94e7c AS |
1427 | } |
1428 | ||
37f9e258 DS |
1429 | rc = xen_get_vmport_regs_pfn(xen_xc, xen_domid, &ioreq_pfn); |
1430 | if (!rc) { | |
1431 | DPRINTF("shared vmport page at pfn %lx\n", ioreq_pfn); | |
1432 | state->shared_vmport_page = | |
e0cb42ae IC |
1433 | xenforeignmemory_map(xen_fmem, xen_domid, PROT_READ|PROT_WRITE, |
1434 | 1, &ioreq_pfn, NULL); | |
37f9e258 | 1435 | if (state->shared_vmport_page == NULL) { |
81daba58 IC |
1436 | error_report("map shared vmport IO page returned error %d handle=%p", |
1437 | errno, xen_xc); | |
dced4d2f | 1438 | goto err; |
37f9e258 DS |
1439 | } |
1440 | } else if (rc != -ENOSYS) { | |
dced4d2f MA |
1441 | error_report("get vmport regs pfn returned error %d, rc=%d", |
1442 | errno, rc); | |
1443 | goto err; | |
37f9e258 DS |
1444 | } |
1445 | ||
37f9e258 DS |
1446 | /* Note: cpus is empty at this point in init */ |
1447 | state->cpu_by_vcpu_id = g_malloc0(max_cpus * sizeof(CPUState *)); | |
1448 | ||
260cabed | 1449 | rc = xen_set_ioreq_server_state(xen_domid, state->ioservid, true); |
3996e85c | 1450 | if (rc < 0) { |
81daba58 | 1451 | error_report("failed to enable ioreq server info: error %d handle=%p", |
dced4d2f MA |
1452 | errno, xen_xc); |
1453 | goto err; | |
3996e85c PD |
1454 | } |
1455 | ||
1cd25a88 | 1456 | state->ioreq_local_port = g_malloc0(max_cpus * sizeof (evtchn_port_t)); |
9ce94e7c AS |
1457 | |
1458 | /* FIXME: how about if we overflow the page here? */ | |
1cd25a88 | 1459 | for (i = 0; i < max_cpus; i++) { |
a2db2a1e | 1460 | rc = xenevtchn_bind_interdomain(state->xce_handle, xen_domid, |
9ce94e7c AS |
1461 | xen_vcpu_eport(state->shared_page, i)); |
1462 | if (rc == -1) { | |
dced4d2f MA |
1463 | error_report("shared evtchn %d bind error %d", i, errno); |
1464 | goto err; | |
9ce94e7c AS |
1465 | } |
1466 | state->ioreq_local_port[i] = rc; | |
1467 | } | |
1468 | ||
a2db2a1e | 1469 | rc = xenevtchn_bind_interdomain(state->xce_handle, xen_domid, |
71cec1ed | 1470 | state->bufioreq_remote_port); |
fda1f768 | 1471 | if (rc == -1) { |
dced4d2f MA |
1472 | error_report("buffered evtchn bind error %d", errno); |
1473 | goto err; | |
fda1f768 SS |
1474 | } |
1475 | state->bufioreq_local_port = rc; | |
1476 | ||
432d268c | 1477 | /* Init RAM management */ |
331b5189 | 1478 | #ifdef XEN_COMPAT_PHYSMAP |
cd1ba7de | 1479 | xen_map_cache_init(xen_phys_offset_to_gaddr, state); |
331b5189 ID |
1480 | #else |
1481 | xen_map_cache_init(NULL, state); | |
1482 | #endif | |
91176e31 | 1483 | xen_ram_init(pcms, ram_size, ram_memory); |
432d268c | 1484 | |
fb4bb2b5 | 1485 | qemu_add_vm_change_state_handler(xen_hvm_change_state_handler, state); |
9ce94e7c | 1486 | |
20581d20 | 1487 | state->memory_listener = xen_memory_listener; |
f6790af6 | 1488 | memory_listener_register(&state->memory_listener, &address_space_memory); |
b4dd7802 AP |
1489 | state->log_for_dirtybit = NULL; |
1490 | ||
3996e85c PD |
1491 | state->io_listener = xen_io_listener; |
1492 | memory_listener_register(&state->io_listener, &address_space_io); | |
1493 | ||
1494 | state->device_listener = xen_device_listener; | |
dfb6578d | 1495 | QLIST_INIT(&state->dev_list); |
3996e85c PD |
1496 | device_listener_register(&state->device_listener); |
1497 | ||
108f7bba PD |
1498 | xen_bus_init(); |
1499 | ||
ad35a7da SS |
1500 | /* Initialize backend core & drivers */ |
1501 | if (xen_be_init() != 0) { | |
dced4d2f MA |
1502 | error_report("xen backend core setup failed"); |
1503 | goto err; | |
ad35a7da | 1504 | } |
0e39bb02 | 1505 | xen_be_register_common(); |
04a8f72e ID |
1506 | |
1507 | QLIST_INIT(&xen_physmap); | |
d1814e08 | 1508 | xen_read_physmap(state); |
804ba7c1 AP |
1509 | |
1510 | /* Disable ACPI build because Xen handles it */ | |
1511 | pcms->acpi_build_enabled = false; | |
1512 | ||
dced4d2f | 1513 | return; |
ad35a7da | 1514 | |
dced4d2f MA |
1515 | err: |
1516 | error_report("xen hardware virtual machine initialisation failed"); | |
1517 | exit(1); | |
29d3ccde | 1518 | } |
9ce94e7c | 1519 | |
180640ea | 1520 | void destroy_hvm_domain(bool reboot) |
9ce94e7c | 1521 | { |
81daba58 | 1522 | xc_interface *xc_handle; |
9ce94e7c | 1523 | int sts; |
6b47c2aa | 1524 | int rc; |
9ce94e7c | 1525 | |
74aae6bf IJ |
1526 | unsigned int reason = reboot ? SHUTDOWN_reboot : SHUTDOWN_poweroff; |
1527 | ||
6b47c2aa IJ |
1528 | if (xen_dmod) { |
1529 | rc = xendevicemodel_shutdown(xen_dmod, xen_domid, reason); | |
1530 | if (!rc) { | |
1531 | return; | |
1532 | } | |
1533 | if (errno != ENOTTY /* old Xen */) { | |
1534 | perror("xendevicemodel_shutdown failed"); | |
1535 | } | |
1536 | /* well, try the old thing then */ | |
1537 | } | |
1538 | ||
81daba58 IC |
1539 | xc_handle = xc_interface_open(0, 0, 0); |
1540 | if (xc_handle == NULL) { | |
9ce94e7c AS |
1541 | fprintf(stderr, "Cannot acquire xenctrl handle\n"); |
1542 | } else { | |
74aae6bf | 1543 | sts = xc_domain_shutdown(xc_handle, xen_domid, reason); |
9ce94e7c | 1544 | if (sts != 0) { |
180640ea JB |
1545 | fprintf(stderr, "xc_domain_shutdown failed to issue %s, " |
1546 | "sts %d, %s\n", reboot ? "reboot" : "poweroff", | |
1547 | sts, strerror(errno)); | |
9ce94e7c | 1548 | } else { |
180640ea JB |
1549 | fprintf(stderr, "Issued domain %d %s\n", xen_domid, |
1550 | reboot ? "reboot" : "poweroff"); | |
9ce94e7c AS |
1551 | } |
1552 | xc_interface_close(xc_handle); | |
1553 | } | |
1554 | } | |
c65adf9b AK |
1555 | |
1556 | void xen_register_framebuffer(MemoryRegion *mr) | |
1557 | { | |
1558 | framebuffer = mr; | |
1559 | } | |
eaab4d60 AK |
1560 | |
1561 | void xen_shutdown_fatal_error(const char *fmt, ...) | |
1562 | { | |
1563 | va_list ap; | |
1564 | ||
1565 | va_start(ap, fmt); | |
1566 | vfprintf(stderr, fmt, ap); | |
1567 | va_end(ap); | |
1568 | fprintf(stderr, "Will destroy the domain.\n"); | |
1569 | /* destroy the domain */ | |
cf83f140 | 1570 | qemu_system_shutdown_request(SHUTDOWN_CAUSE_HOST_ERROR); |
eaab4d60 | 1571 | } |
910b38e4 | 1572 | |
5100afb5 | 1573 | void xen_hvm_modified_memory(ram_addr_t start, ram_addr_t length) |
910b38e4 AP |
1574 | { |
1575 | if (unlikely(xen_in_migration)) { | |
1576 | int rc; | |
1577 | ram_addr_t start_pfn, nb_pages; | |
1578 | ||
04a8f72e ID |
1579 | start = xen_phys_offset_to_gaddr(start, length); |
1580 | ||
910b38e4 AP |
1581 | if (length == 0) { |
1582 | length = TARGET_PAGE_SIZE; | |
1583 | } | |
1584 | start_pfn = start >> TARGET_PAGE_BITS; | |
1585 | nb_pages = ((start + length + TARGET_PAGE_SIZE - 1) >> TARGET_PAGE_BITS) | |
1586 | - start_pfn; | |
8f25e754 | 1587 | rc = xen_modified_memory(xen_domid, start_pfn, nb_pages); |
910b38e4 AP |
1588 | if (rc) { |
1589 | fprintf(stderr, | |
1590 | "%s failed for "RAM_ADDR_FMT" ("RAM_ADDR_FMT"): %i, %s\n", | |
7cdcca72 | 1591 | __func__, start, nb_pages, errno, strerror(errno)); |
910b38e4 AP |
1592 | } |
1593 | } | |
1594 | } | |
04b0de0e WL |
1595 | |
1596 | void qmp_xen_set_global_dirty_log(bool enable, Error **errp) | |
1597 | { | |
1598 | if (enable) { | |
1599 | memory_global_dirty_log_start(); | |
1600 | } else { | |
1601 | memory_global_dirty_log_stop(); | |
1602 | } | |
1603 | } |