2 * Copyright (C) 2010 Citrix Ltd.
4 * This work is licensed under the terms of the GNU GPL, version 2. See
5 * the COPYING file in the top-level directory.
7 * Contributions after 2012-01-13 are licensed under the terms of the
8 * GNU GPL, version 2 or (at your option) any later version.
11 #include "qemu/osdep.h"
14 #include "hw/pci/pci.h"
15 #include "hw/pci/pci_host.h"
16 #include "hw/i386/pc.h"
17 #include "hw/southbridge/piix.h"
20 #include "hw/i386/apic-msidef.h"
21 #include "hw/xen/xen_common.h"
22 #include "hw/xen/xen-legacy-backend.h"
23 #include "hw/xen/xen-bus.h"
24 #include "qapi/error.h"
25 #include "qapi/qapi-commands-misc.h"
26 #include "qemu/error-report.h"
27 #include "qemu/main-loop.h"
28 #include "qemu/range.h"
29 #include "sysemu/runstate.h"
30 #include "sysemu/sysemu.h"
31 #include "sysemu/xen.h"
32 #include "sysemu/xen-mapcache.h"
34 #include "exec/address-spaces.h"
36 #include <xen/hvm/ioreq.h>
37 #include <xen/hvm/e820.h>
39 //#define DEBUG_XEN_HVM
42 #define DPRINTF(fmt, ...) \
43 do { fprintf(stderr, "xen: " fmt, ## __VA_ARGS__); } while (0)
45 #define DPRINTF(fmt, ...) \
49 static MemoryRegion ram_memory
, ram_640k
, ram_lo
, ram_hi
;
50 static MemoryRegion
*framebuffer
;
51 static bool xen_in_migration
;
53 /* Compatibility with older version */
55 /* This allows QEMU to build on a system that has Xen 4.5 or earlier
56 * installed. This here (not in hw/xen/xen_common.h) because xen/hvm/ioreq.h
57 * needs to be included before this block and hw/xen/xen_common.h needs to
58 * be included before xen/hvm/ioreq.h
60 #ifndef IOREQ_TYPE_VMWARE_PORT
61 #define IOREQ_TYPE_VMWARE_PORT 3
69 typedef struct vmware_regs vmware_regs_t
;
71 struct shared_vmport_iopage
{
72 struct vmware_regs vcpu_vmport_regs
[1];
74 typedef struct shared_vmport_iopage shared_vmport_iopage_t
;
77 static inline uint32_t xen_vcpu_eport(shared_iopage_t
*shared_page
, int i
)
79 return shared_page
->vcpu_ioreq
[i
].vp_eport
;
81 static inline ioreq_t
*xen_vcpu_ioreq(shared_iopage_t
*shared_page
, int vcpu
)
83 return &shared_page
->vcpu_ioreq
[vcpu
];
86 #define BUFFER_IO_MAX_DELAY 100
88 typedef struct XenPhysmap
{
94 QLIST_ENTRY(XenPhysmap
) list
;
97 static QLIST_HEAD(, XenPhysmap
) xen_physmap
;
99 typedef struct XenPciDevice
{
102 QLIST_ENTRY(XenPciDevice
) entry
;
105 typedef struct XenIOState
{
107 shared_iopage_t
*shared_page
;
108 shared_vmport_iopage_t
*shared_vmport_page
;
109 buffered_iopage_t
*buffered_io_page
;
110 QEMUTimer
*buffered_io_timer
;
111 CPUState
**cpu_by_vcpu_id
;
112 /* the evtchn port for polling the notification, */
113 evtchn_port_t
*ioreq_local_port
;
114 /* evtchn remote and local ports for buffered io */
115 evtchn_port_t bufioreq_remote_port
;
116 evtchn_port_t bufioreq_local_port
;
117 /* the evtchn fd for polling */
118 xenevtchn_handle
*xce_handle
;
119 /* which vcpu we are serving */
122 struct xs_handle
*xenstore
;
123 MemoryListener memory_listener
;
124 MemoryListener io_listener
;
125 QLIST_HEAD(, XenPciDevice
) dev_list
;
126 DeviceListener device_listener
;
127 hwaddr free_phys_offset
;
128 const XenPhysmap
*log_for_dirtybit
;
129 /* Buffer used by xen_sync_dirty_bitmap */
130 unsigned long *dirty_bitmap
;
137 /* Xen specific function for piix pci */
139 int xen_pci_slot_get_pirq(PCIDevice
*pci_dev
, int irq_num
)
141 return irq_num
+ ((pci_dev
->devfn
>> 3) << 2);
144 void xen_piix3_set_irq(void *opaque
, int irq_num
, int level
)
146 xen_set_pci_intx_level(xen_domid
, 0, 0, irq_num
>> 2,
150 void xen_piix_pci_write_config_client(uint32_t address
, uint32_t val
, int len
)
154 /* Scan for updates to PCI link routes (0x60-0x63). */
155 for (i
= 0; i
< len
; i
++) {
156 uint8_t v
= (val
>> (8 * i
)) & 0xff;
161 if (((address
+ i
) >= PIIX_PIRQCA
) && ((address
+ i
) <= PIIX_PIRQCD
)) {
162 xen_set_pci_link_route(xen_domid
, address
+ i
- PIIX_PIRQCA
, v
);
167 int xen_is_pirq_msi(uint32_t msi_data
)
169 /* If vector is 0, the msi is remapped into a pirq, passed as
172 return ((msi_data
& MSI_DATA_VECTOR_MASK
) >> MSI_DATA_VECTOR_SHIFT
) == 0;
175 void xen_hvm_inject_msi(uint64_t addr
, uint32_t data
)
177 xen_inject_msi(xen_domid
, addr
, data
);
180 static void xen_suspend_notifier(Notifier
*notifier
, void *data
)
182 xc_set_hvm_param(xen_xc
, xen_domid
, HVM_PARAM_ACPI_S_STATE
, 3);
185 /* Xen Interrupt Controller */
187 static void xen_set_irq(void *opaque
, int irq
, int level
)
189 xen_set_isa_irq_level(xen_domid
, irq
, level
);
192 qemu_irq
*xen_interrupt_controller_init(void)
194 return qemu_allocate_irqs(xen_set_irq
, NULL
, 16);
199 static void xen_ram_init(PCMachineState
*pcms
,
200 ram_addr_t ram_size
, MemoryRegion
**ram_memory_p
)
202 X86MachineState
*x86ms
= X86_MACHINE(pcms
);
203 MemoryRegion
*sysmem
= get_system_memory();
204 ram_addr_t block_len
;
205 uint64_t user_lowmem
=
206 object_property_get_uint(qdev_get_machine(),
207 X86_MACHINE_MAX_RAM_BELOW_4G
,
210 /* Handle the machine opt max-ram-below-4g. It is basically doing
211 * min(xen limit, user limit).
214 user_lowmem
= HVM_BELOW_4G_RAM_END
; /* default */
216 if (HVM_BELOW_4G_RAM_END
<= user_lowmem
) {
217 user_lowmem
= HVM_BELOW_4G_RAM_END
;
220 if (ram_size
>= user_lowmem
) {
221 x86ms
->above_4g_mem_size
= ram_size
- user_lowmem
;
222 x86ms
->below_4g_mem_size
= user_lowmem
;
224 x86ms
->above_4g_mem_size
= 0;
225 x86ms
->below_4g_mem_size
= ram_size
;
227 if (!x86ms
->above_4g_mem_size
) {
228 block_len
= ram_size
;
231 * Xen does not allocate the memory continuously, it keeps a
232 * hole of the size computed above or passed in.
234 block_len
= (1ULL << 32) + x86ms
->above_4g_mem_size
;
236 memory_region_init_ram(&ram_memory
, NULL
, "xen.ram", block_len
,
238 *ram_memory_p
= &ram_memory
;
240 memory_region_init_alias(&ram_640k
, NULL
, "xen.ram.640k",
241 &ram_memory
, 0, 0xa0000);
242 memory_region_add_subregion(sysmem
, 0, &ram_640k
);
243 /* Skip of the VGA IO memory space, it will be registered later by the VGA
246 * The area between 0xc0000 and 0x100000 will be used by SeaBIOS to load
247 * the Options ROM, so it is registered here as RAM.
249 memory_region_init_alias(&ram_lo
, NULL
, "xen.ram.lo",
250 &ram_memory
, 0xc0000,
251 x86ms
->below_4g_mem_size
- 0xc0000);
252 memory_region_add_subregion(sysmem
, 0xc0000, &ram_lo
);
253 if (x86ms
->above_4g_mem_size
> 0) {
254 memory_region_init_alias(&ram_hi
, NULL
, "xen.ram.hi",
255 &ram_memory
, 0x100000000ULL
,
256 x86ms
->above_4g_mem_size
);
257 memory_region_add_subregion(sysmem
, 0x100000000ULL
, &ram_hi
);
261 void xen_ram_alloc(ram_addr_t ram_addr
, ram_addr_t size
, MemoryRegion
*mr
,
264 unsigned long nr_pfn
;
268 if (runstate_check(RUN_STATE_INMIGRATE
)) {
269 /* RAM already populated in Xen */
270 fprintf(stderr
, "%s: do not alloc "RAM_ADDR_FMT
271 " bytes of ram at "RAM_ADDR_FMT
" when runstate is INMIGRATE\n",
272 __func__
, size
, ram_addr
);
276 if (mr
== &ram_memory
) {
280 trace_xen_ram_alloc(ram_addr
, size
);
282 nr_pfn
= size
>> TARGET_PAGE_BITS
;
283 pfn_list
= g_malloc(sizeof (*pfn_list
) * nr_pfn
);
285 for (i
= 0; i
< nr_pfn
; i
++) {
286 pfn_list
[i
] = (ram_addr
>> TARGET_PAGE_BITS
) + i
;
289 if (xc_domain_populate_physmap_exact(xen_xc
, xen_domid
, nr_pfn
, 0, 0, pfn_list
)) {
290 error_setg(errp
, "xen: failed to populate ram at " RAM_ADDR_FMT
,
297 static XenPhysmap
*get_physmapping(hwaddr start_addr
, ram_addr_t size
)
299 XenPhysmap
*physmap
= NULL
;
301 start_addr
&= TARGET_PAGE_MASK
;
303 QLIST_FOREACH(physmap
, &xen_physmap
, list
) {
304 if (range_covers_byte(physmap
->start_addr
, physmap
->size
, start_addr
)) {
311 static hwaddr
xen_phys_offset_to_gaddr(hwaddr phys_offset
, ram_addr_t size
)
313 hwaddr addr
= phys_offset
& TARGET_PAGE_MASK
;
314 XenPhysmap
*physmap
= NULL
;
316 QLIST_FOREACH(physmap
, &xen_physmap
, list
) {
317 if (range_covers_byte(physmap
->phys_offset
, physmap
->size
, addr
)) {
318 return physmap
->start_addr
+ (phys_offset
- physmap
->phys_offset
);
325 #ifdef XEN_COMPAT_PHYSMAP
326 static int xen_save_physmap(XenIOState
*state
, XenPhysmap
*physmap
)
328 char path
[80], value
[17];
330 snprintf(path
, sizeof(path
),
331 "/local/domain/0/device-model/%d/physmap/%"PRIx64
"/start_addr",
332 xen_domid
, (uint64_t)physmap
->phys_offset
);
333 snprintf(value
, sizeof(value
), "%"PRIx64
, (uint64_t)physmap
->start_addr
);
334 if (!xs_write(state
->xenstore
, 0, path
, value
, strlen(value
))) {
337 snprintf(path
, sizeof(path
),
338 "/local/domain/0/device-model/%d/physmap/%"PRIx64
"/size",
339 xen_domid
, (uint64_t)physmap
->phys_offset
);
340 snprintf(value
, sizeof(value
), "%"PRIx64
, (uint64_t)physmap
->size
);
341 if (!xs_write(state
->xenstore
, 0, path
, value
, strlen(value
))) {
345 snprintf(path
, sizeof(path
),
346 "/local/domain/0/device-model/%d/physmap/%"PRIx64
"/name",
347 xen_domid
, (uint64_t)physmap
->phys_offset
);
348 if (!xs_write(state
->xenstore
, 0, path
,
349 physmap
->name
, strlen(physmap
->name
))) {
356 static int xen_save_physmap(XenIOState
*state
, XenPhysmap
*physmap
)
362 static int xen_add_to_physmap(XenIOState
*state
,
366 hwaddr offset_within_region
)
368 unsigned long nr_pages
;
370 XenPhysmap
*physmap
= NULL
;
371 hwaddr pfn
, start_gpfn
;
372 hwaddr phys_offset
= memory_region_get_ram_addr(mr
);
375 if (get_physmapping(start_addr
, size
)) {
382 /* Xen can only handle a single dirty log region for now and we want
383 * the linear framebuffer to be that region.
384 * Avoid tracking any regions that is not videoram and avoid tracking
385 * the legacy vga region. */
386 if (mr
== framebuffer
&& start_addr
> 0xbffff) {
392 DPRINTF("mapping vram to %"HWADDR_PRIx
" - %"HWADDR_PRIx
"\n",
393 start_addr
, start_addr
+ size
);
395 mr_name
= memory_region_name(mr
);
397 physmap
= g_malloc(sizeof(XenPhysmap
));
399 physmap
->start_addr
= start_addr
;
400 physmap
->size
= size
;
401 physmap
->name
= mr_name
;
402 physmap
->phys_offset
= phys_offset
;
404 QLIST_INSERT_HEAD(&xen_physmap
, physmap
, list
);
406 if (runstate_check(RUN_STATE_INMIGRATE
)) {
407 /* Now when we have a physmap entry we can replace a dummy mapping with
408 * a real one of guest foreign memory. */
409 uint8_t *p
= xen_replace_cache_entry(phys_offset
, start_addr
, size
);
410 assert(p
&& p
== memory_region_get_ram_ptr(mr
));
415 pfn
= phys_offset
>> TARGET_PAGE_BITS
;
416 start_gpfn
= start_addr
>> TARGET_PAGE_BITS
;
417 nr_pages
= size
>> TARGET_PAGE_BITS
;
418 rc
= xendevicemodel_relocate_memory(xen_dmod
, xen_domid
, nr_pages
, pfn
,
421 int saved_errno
= errno
;
423 error_report("relocate_memory %lu pages from GFN %"HWADDR_PRIx
424 " to GFN %"HWADDR_PRIx
" failed: %s",
425 nr_pages
, pfn
, start_gpfn
, strerror(saved_errno
));
430 rc
= xendevicemodel_pin_memory_cacheattr(xen_dmod
, xen_domid
,
431 start_addr
>> TARGET_PAGE_BITS
,
432 (start_addr
+ size
- 1) >> TARGET_PAGE_BITS
,
433 XEN_DOMCTL_MEM_CACHEATTR_WB
);
435 error_report("pin_memory_cacheattr failed: %s", strerror(errno
));
437 return xen_save_physmap(state
, physmap
);
440 static int xen_remove_from_physmap(XenIOState
*state
,
445 XenPhysmap
*physmap
= NULL
;
446 hwaddr phys_offset
= 0;
448 physmap
= get_physmapping(start_addr
, size
);
449 if (physmap
== NULL
) {
453 phys_offset
= physmap
->phys_offset
;
454 size
= physmap
->size
;
456 DPRINTF("unmapping vram to %"HWADDR_PRIx
" - %"HWADDR_PRIx
", at "
457 "%"HWADDR_PRIx
"\n", start_addr
, start_addr
+ size
, phys_offset
);
459 size
>>= TARGET_PAGE_BITS
;
460 start_addr
>>= TARGET_PAGE_BITS
;
461 phys_offset
>>= TARGET_PAGE_BITS
;
462 rc
= xendevicemodel_relocate_memory(xen_dmod
, xen_domid
, size
, start_addr
,
465 int saved_errno
= errno
;
467 error_report("relocate_memory "RAM_ADDR_FMT
" pages"
468 " from GFN %"HWADDR_PRIx
469 " to GFN %"HWADDR_PRIx
" failed: %s",
470 size
, start_addr
, phys_offset
, strerror(saved_errno
));
475 QLIST_REMOVE(physmap
, list
);
476 if (state
->log_for_dirtybit
== physmap
) {
477 state
->log_for_dirtybit
= NULL
;
478 g_free(state
->dirty_bitmap
);
479 state
->dirty_bitmap
= NULL
;
486 static void xen_set_memory(struct MemoryListener
*listener
,
487 MemoryRegionSection
*section
,
490 XenIOState
*state
= container_of(listener
, XenIOState
, memory_listener
);
491 hwaddr start_addr
= section
->offset_within_address_space
;
492 ram_addr_t size
= int128_get64(section
->size
);
493 bool log_dirty
= memory_region_is_logging(section
->mr
, DIRTY_MEMORY_VGA
);
494 hvmmem_type_t mem_type
;
496 if (section
->mr
== &ram_memory
) {
500 xen_map_memory_section(xen_domid
, state
->ioservid
,
503 xen_unmap_memory_section(xen_domid
, state
->ioservid
,
508 if (!memory_region_is_ram(section
->mr
)) {
512 if (log_dirty
!= add
) {
516 trace_xen_client_set_memory(start_addr
, size
, log_dirty
);
518 start_addr
&= TARGET_PAGE_MASK
;
519 size
= TARGET_PAGE_ALIGN(size
);
522 if (!memory_region_is_rom(section
->mr
)) {
523 xen_add_to_physmap(state
, start_addr
, size
,
524 section
->mr
, section
->offset_within_region
);
526 mem_type
= HVMMEM_ram_ro
;
527 if (xen_set_mem_type(xen_domid
, mem_type
,
528 start_addr
>> TARGET_PAGE_BITS
,
529 size
>> TARGET_PAGE_BITS
)) {
530 DPRINTF("xen_set_mem_type error, addr: "TARGET_FMT_plx
"\n",
535 if (xen_remove_from_physmap(state
, start_addr
, size
) < 0) {
536 DPRINTF("physmapping does not exist at "TARGET_FMT_plx
"\n", start_addr
);
541 static void xen_region_add(MemoryListener
*listener
,
542 MemoryRegionSection
*section
)
544 memory_region_ref(section
->mr
);
545 xen_set_memory(listener
, section
, true);
548 static void xen_region_del(MemoryListener
*listener
,
549 MemoryRegionSection
*section
)
551 xen_set_memory(listener
, section
, false);
552 memory_region_unref(section
->mr
);
555 static void xen_io_add(MemoryListener
*listener
,
556 MemoryRegionSection
*section
)
558 XenIOState
*state
= container_of(listener
, XenIOState
, io_listener
);
559 MemoryRegion
*mr
= section
->mr
;
561 if (mr
->ops
== &unassigned_io_ops
) {
565 memory_region_ref(mr
);
567 xen_map_io_section(xen_domid
, state
->ioservid
, section
);
570 static void xen_io_del(MemoryListener
*listener
,
571 MemoryRegionSection
*section
)
573 XenIOState
*state
= container_of(listener
, XenIOState
, io_listener
);
574 MemoryRegion
*mr
= section
->mr
;
576 if (mr
->ops
== &unassigned_io_ops
) {
580 xen_unmap_io_section(xen_domid
, state
->ioservid
, section
);
582 memory_region_unref(mr
);
585 static void xen_device_realize(DeviceListener
*listener
,
588 XenIOState
*state
= container_of(listener
, XenIOState
, device_listener
);
590 if (object_dynamic_cast(OBJECT(dev
), TYPE_PCI_DEVICE
)) {
591 PCIDevice
*pci_dev
= PCI_DEVICE(dev
);
592 XenPciDevice
*xendev
= g_new(XenPciDevice
, 1);
594 xendev
->pci_dev
= pci_dev
;
595 xendev
->sbdf
= PCI_BUILD_BDF(pci_dev_bus_num(pci_dev
),
597 QLIST_INSERT_HEAD(&state
->dev_list
, xendev
, entry
);
599 xen_map_pcidev(xen_domid
, state
->ioservid
, pci_dev
);
603 static void xen_device_unrealize(DeviceListener
*listener
,
606 XenIOState
*state
= container_of(listener
, XenIOState
, device_listener
);
608 if (object_dynamic_cast(OBJECT(dev
), TYPE_PCI_DEVICE
)) {
609 PCIDevice
*pci_dev
= PCI_DEVICE(dev
);
610 XenPciDevice
*xendev
, *next
;
612 xen_unmap_pcidev(xen_domid
, state
->ioservid
, pci_dev
);
614 QLIST_FOREACH_SAFE(xendev
, &state
->dev_list
, entry
, next
) {
615 if (xendev
->pci_dev
== pci_dev
) {
616 QLIST_REMOVE(xendev
, entry
);
624 static void xen_sync_dirty_bitmap(XenIOState
*state
,
628 hwaddr npages
= size
>> TARGET_PAGE_BITS
;
629 const int width
= sizeof(unsigned long) * 8;
630 size_t bitmap_size
= DIV_ROUND_UP(npages
, width
);
632 const XenPhysmap
*physmap
= NULL
;
634 physmap
= get_physmapping(start_addr
, size
);
635 if (physmap
== NULL
) {
640 if (state
->log_for_dirtybit
== NULL
) {
641 state
->log_for_dirtybit
= physmap
;
642 state
->dirty_bitmap
= g_new(unsigned long, bitmap_size
);
643 } else if (state
->log_for_dirtybit
!= physmap
) {
644 /* Only one range for dirty bitmap can be tracked. */
648 rc
= xen_track_dirty_vram(xen_domid
, start_addr
>> TARGET_PAGE_BITS
,
649 npages
, state
->dirty_bitmap
);
652 #define ENODATA ENOENT
654 if (errno
== ENODATA
) {
655 memory_region_set_dirty(framebuffer
, 0, size
);
656 DPRINTF("xen: track_dirty_vram failed (0x" TARGET_FMT_plx
657 ", 0x" TARGET_FMT_plx
"): %s\n",
658 start_addr
, start_addr
+ size
, strerror(errno
));
663 for (i
= 0; i
< bitmap_size
; i
++) {
664 unsigned long map
= state
->dirty_bitmap
[i
];
668 memory_region_set_dirty(framebuffer
,
669 (i
* width
+ j
) * TARGET_PAGE_SIZE
,
675 static void xen_log_start(MemoryListener
*listener
,
676 MemoryRegionSection
*section
,
679 XenIOState
*state
= container_of(listener
, XenIOState
, memory_listener
);
681 if (new & ~old
& (1 << DIRTY_MEMORY_VGA
)) {
682 xen_sync_dirty_bitmap(state
, section
->offset_within_address_space
,
683 int128_get64(section
->size
));
687 static void xen_log_stop(MemoryListener
*listener
, MemoryRegionSection
*section
,
690 XenIOState
*state
= container_of(listener
, XenIOState
, memory_listener
);
692 if (old
& ~new & (1 << DIRTY_MEMORY_VGA
)) {
693 state
->log_for_dirtybit
= NULL
;
694 g_free(state
->dirty_bitmap
);
695 state
->dirty_bitmap
= NULL
;
696 /* Disable dirty bit tracking */
697 xen_track_dirty_vram(xen_domid
, 0, 0, NULL
);
701 static void xen_log_sync(MemoryListener
*listener
, MemoryRegionSection
*section
)
703 XenIOState
*state
= container_of(listener
, XenIOState
, memory_listener
);
705 xen_sync_dirty_bitmap(state
, section
->offset_within_address_space
,
706 int128_get64(section
->size
));
709 static void xen_log_global_start(MemoryListener
*listener
)
712 xen_in_migration
= true;
716 static void xen_log_global_stop(MemoryListener
*listener
)
718 xen_in_migration
= false;
721 static MemoryListener xen_memory_listener
= {
722 .region_add
= xen_region_add
,
723 .region_del
= xen_region_del
,
724 .log_start
= xen_log_start
,
725 .log_stop
= xen_log_stop
,
726 .log_sync
= xen_log_sync
,
727 .log_global_start
= xen_log_global_start
,
728 .log_global_stop
= xen_log_global_stop
,
732 static MemoryListener xen_io_listener
= {
733 .region_add
= xen_io_add
,
734 .region_del
= xen_io_del
,
738 static DeviceListener xen_device_listener
= {
739 .realize
= xen_device_realize
,
740 .unrealize
= xen_device_unrealize
,
743 /* get the ioreq packets from share mem */
744 static ioreq_t
*cpu_get_ioreq_from_shared_memory(XenIOState
*state
, int vcpu
)
746 ioreq_t
*req
= xen_vcpu_ioreq(state
->shared_page
, vcpu
);
748 if (req
->state
!= STATE_IOREQ_READY
) {
749 DPRINTF("I/O request not ready: "
750 "%x, ptr: %x, port: %"PRIx64
", "
751 "data: %"PRIx64
", count: %u, size: %u\n",
752 req
->state
, req
->data_is_ptr
, req
->addr
,
753 req
->data
, req
->count
, req
->size
);
757 xen_rmb(); /* see IOREQ_READY /then/ read contents of ioreq */
759 req
->state
= STATE_IOREQ_INPROCESS
;
763 /* use poll to get the port notification */
764 /* ioreq_vec--out,the */
765 /* retval--the number of ioreq packet */
766 static ioreq_t
*cpu_get_ioreq(XenIOState
*state
)
768 MachineState
*ms
= MACHINE(qdev_get_machine());
769 unsigned int max_cpus
= ms
->smp
.max_cpus
;
773 port
= xenevtchn_pending(state
->xce_handle
);
774 if (port
== state
->bufioreq_local_port
) {
775 timer_mod(state
->buffered_io_timer
,
776 BUFFER_IO_MAX_DELAY
+ qemu_clock_get_ms(QEMU_CLOCK_REALTIME
));
781 for (i
= 0; i
< max_cpus
; i
++) {
782 if (state
->ioreq_local_port
[i
] == port
) {
788 hw_error("Fatal error while trying to get io event!\n");
791 /* unmask the wanted port again */
792 xenevtchn_unmask(state
->xce_handle
, port
);
794 /* get the io packet from shared memory */
795 state
->send_vcpu
= i
;
796 return cpu_get_ioreq_from_shared_memory(state
, i
);
799 /* read error or read nothing */
803 static uint32_t do_inp(uint32_t addr
, unsigned long size
)
807 return cpu_inb(addr
);
809 return cpu_inw(addr
);
811 return cpu_inl(addr
);
813 hw_error("inp: bad size: %04x %lx", addr
, size
);
817 static void do_outp(uint32_t addr
,
818 unsigned long size
, uint32_t val
)
822 return cpu_outb(addr
, val
);
824 return cpu_outw(addr
, val
);
826 return cpu_outl(addr
, val
);
828 hw_error("outp: bad size: %04x %lx", addr
, size
);
833 * Helper functions which read/write an object from/to physical guest
834 * memory, as part of the implementation of an ioreq.
837 * cpu_physical_memory_rw(addr + (req->df ? -1 : +1) * req->size * i,
838 * val, req->size, 0/1)
839 * except without the integer overflow problems.
841 static void rw_phys_req_item(hwaddr addr
,
842 ioreq_t
*req
, uint32_t i
, void *val
, int rw
)
844 /* Do everything unsigned so overflow just results in a truncated result
845 * and accesses to undesired parts of guest memory, which is up
847 hwaddr offset
= (hwaddr
)req
->size
* i
;
853 cpu_physical_memory_rw(addr
, val
, req
->size
, rw
);
856 static inline void read_phys_req_item(hwaddr addr
,
857 ioreq_t
*req
, uint32_t i
, void *val
)
859 rw_phys_req_item(addr
, req
, i
, val
, 0);
861 static inline void write_phys_req_item(hwaddr addr
,
862 ioreq_t
*req
, uint32_t i
, void *val
)
864 rw_phys_req_item(addr
, req
, i
, val
, 1);
868 static void cpu_ioreq_pio(ioreq_t
*req
)
872 trace_cpu_ioreq_pio(req
, req
->dir
, req
->df
, req
->data_is_ptr
, req
->addr
,
873 req
->data
, req
->count
, req
->size
);
875 if (req
->size
> sizeof(uint32_t)) {
876 hw_error("PIO: bad size (%u)", req
->size
);
879 if (req
->dir
== IOREQ_READ
) {
880 if (!req
->data_is_ptr
) {
881 req
->data
= do_inp(req
->addr
, req
->size
);
882 trace_cpu_ioreq_pio_read_reg(req
, req
->data
, req
->addr
,
887 for (i
= 0; i
< req
->count
; i
++) {
888 tmp
= do_inp(req
->addr
, req
->size
);
889 write_phys_req_item(req
->data
, req
, i
, &tmp
);
892 } else if (req
->dir
== IOREQ_WRITE
) {
893 if (!req
->data_is_ptr
) {
894 trace_cpu_ioreq_pio_write_reg(req
, req
->data
, req
->addr
,
896 do_outp(req
->addr
, req
->size
, req
->data
);
898 for (i
= 0; i
< req
->count
; i
++) {
901 read_phys_req_item(req
->data
, req
, i
, &tmp
);
902 do_outp(req
->addr
, req
->size
, tmp
);
908 static void cpu_ioreq_move(ioreq_t
*req
)
912 trace_cpu_ioreq_move(req
, req
->dir
, req
->df
, req
->data_is_ptr
, req
->addr
,
913 req
->data
, req
->count
, req
->size
);
915 if (req
->size
> sizeof(req
->data
)) {
916 hw_error("MMIO: bad size (%u)", req
->size
);
919 if (!req
->data_is_ptr
) {
920 if (req
->dir
== IOREQ_READ
) {
921 for (i
= 0; i
< req
->count
; i
++) {
922 read_phys_req_item(req
->addr
, req
, i
, &req
->data
);
924 } else if (req
->dir
== IOREQ_WRITE
) {
925 for (i
= 0; i
< req
->count
; i
++) {
926 write_phys_req_item(req
->addr
, req
, i
, &req
->data
);
932 if (req
->dir
== IOREQ_READ
) {
933 for (i
= 0; i
< req
->count
; i
++) {
934 read_phys_req_item(req
->addr
, req
, i
, &tmp
);
935 write_phys_req_item(req
->data
, req
, i
, &tmp
);
937 } else if (req
->dir
== IOREQ_WRITE
) {
938 for (i
= 0; i
< req
->count
; i
++) {
939 read_phys_req_item(req
->data
, req
, i
, &tmp
);
940 write_phys_req_item(req
->addr
, req
, i
, &tmp
);
946 static void cpu_ioreq_config(XenIOState
*state
, ioreq_t
*req
)
948 uint32_t sbdf
= req
->addr
>> 32;
949 uint32_t reg
= req
->addr
;
950 XenPciDevice
*xendev
;
952 if (req
->size
!= sizeof(uint8_t) && req
->size
!= sizeof(uint16_t) &&
953 req
->size
!= sizeof(uint32_t)) {
954 hw_error("PCI config access: bad size (%u)", req
->size
);
957 if (req
->count
!= 1) {
958 hw_error("PCI config access: bad count (%u)", req
->count
);
961 QLIST_FOREACH(xendev
, &state
->dev_list
, entry
) {
962 if (xendev
->sbdf
!= sbdf
) {
966 if (!req
->data_is_ptr
) {
967 if (req
->dir
== IOREQ_READ
) {
968 req
->data
= pci_host_config_read_common(
969 xendev
->pci_dev
, reg
, PCI_CONFIG_SPACE_SIZE
,
971 trace_cpu_ioreq_config_read(req
, xendev
->sbdf
, reg
,
972 req
->size
, req
->data
);
973 } else if (req
->dir
== IOREQ_WRITE
) {
974 trace_cpu_ioreq_config_write(req
, xendev
->sbdf
, reg
,
975 req
->size
, req
->data
);
976 pci_host_config_write_common(
977 xendev
->pci_dev
, reg
, PCI_CONFIG_SPACE_SIZE
,
978 req
->data
, req
->size
);
983 if (req
->dir
== IOREQ_READ
) {
984 tmp
= pci_host_config_read_common(
985 xendev
->pci_dev
, reg
, PCI_CONFIG_SPACE_SIZE
,
987 trace_cpu_ioreq_config_read(req
, xendev
->sbdf
, reg
,
989 write_phys_req_item(req
->data
, req
, 0, &tmp
);
990 } else if (req
->dir
== IOREQ_WRITE
) {
991 read_phys_req_item(req
->data
, req
, 0, &tmp
);
992 trace_cpu_ioreq_config_write(req
, xendev
->sbdf
, reg
,
994 pci_host_config_write_common(
995 xendev
->pci_dev
, reg
, PCI_CONFIG_SPACE_SIZE
,
1002 static void regs_to_cpu(vmware_regs_t
*vmport_regs
, ioreq_t
*req
)
1007 cpu
= X86_CPU(current_cpu
);
1009 env
->regs
[R_EAX
] = req
->data
;
1010 env
->regs
[R_EBX
] = vmport_regs
->ebx
;
1011 env
->regs
[R_ECX
] = vmport_regs
->ecx
;
1012 env
->regs
[R_EDX
] = vmport_regs
->edx
;
1013 env
->regs
[R_ESI
] = vmport_regs
->esi
;
1014 env
->regs
[R_EDI
] = vmport_regs
->edi
;
1017 static void regs_from_cpu(vmware_regs_t
*vmport_regs
)
1019 X86CPU
*cpu
= X86_CPU(current_cpu
);
1020 CPUX86State
*env
= &cpu
->env
;
1022 vmport_regs
->ebx
= env
->regs
[R_EBX
];
1023 vmport_regs
->ecx
= env
->regs
[R_ECX
];
1024 vmport_regs
->edx
= env
->regs
[R_EDX
];
1025 vmport_regs
->esi
= env
->regs
[R_ESI
];
1026 vmport_regs
->edi
= env
->regs
[R_EDI
];
1029 static void handle_vmport_ioreq(XenIOState
*state
, ioreq_t
*req
)
1031 vmware_regs_t
*vmport_regs
;
1033 assert(state
->shared_vmport_page
);
1035 &state
->shared_vmport_page
->vcpu_vmport_regs
[state
->send_vcpu
];
1036 QEMU_BUILD_BUG_ON(sizeof(*req
) < sizeof(*vmport_regs
));
1038 current_cpu
= state
->cpu_by_vcpu_id
[state
->send_vcpu
];
1039 regs_to_cpu(vmport_regs
, req
);
1041 regs_from_cpu(vmport_regs
);
1045 static void handle_ioreq(XenIOState
*state
, ioreq_t
*req
)
1047 trace_handle_ioreq(req
, req
->type
, req
->dir
, req
->df
, req
->data_is_ptr
,
1048 req
->addr
, req
->data
, req
->count
, req
->size
);
1050 if (!req
->data_is_ptr
&& (req
->dir
== IOREQ_WRITE
) &&
1051 (req
->size
< sizeof (target_ulong
))) {
1052 req
->data
&= ((target_ulong
) 1 << (8 * req
->size
)) - 1;
1055 if (req
->dir
== IOREQ_WRITE
)
1056 trace_handle_ioreq_write(req
, req
->type
, req
->df
, req
->data_is_ptr
,
1057 req
->addr
, req
->data
, req
->count
, req
->size
);
1059 switch (req
->type
) {
1060 case IOREQ_TYPE_PIO
:
1063 case IOREQ_TYPE_COPY
:
1064 cpu_ioreq_move(req
);
1066 case IOREQ_TYPE_VMWARE_PORT
:
1067 handle_vmport_ioreq(state
, req
);
1069 case IOREQ_TYPE_TIMEOFFSET
:
1071 case IOREQ_TYPE_INVALIDATE
:
1072 xen_invalidate_map_cache();
1074 case IOREQ_TYPE_PCI_CONFIG
:
1075 cpu_ioreq_config(state
, req
);
1078 hw_error("Invalid ioreq type 0x%x\n", req
->type
);
1080 if (req
->dir
== IOREQ_READ
) {
1081 trace_handle_ioreq_read(req
, req
->type
, req
->df
, req
->data_is_ptr
,
1082 req
->addr
, req
->data
, req
->count
, req
->size
);
1086 static int handle_buffered_iopage(XenIOState
*state
)
1088 buffered_iopage_t
*buf_page
= state
->buffered_io_page
;
1089 buf_ioreq_t
*buf_req
= NULL
;
1097 memset(&req
, 0x00, sizeof(req
));
1098 req
.state
= STATE_IOREQ_READY
;
1100 req
.dir
= IOREQ_WRITE
;
1103 uint32_t rdptr
= buf_page
->read_pointer
, wrptr
;
1106 wrptr
= buf_page
->write_pointer
;
1108 if (rdptr
!= buf_page
->read_pointer
) {
1111 if (rdptr
== wrptr
) {
1114 buf_req
= &buf_page
->buf_ioreq
[rdptr
% IOREQ_BUFFER_SLOT_NUM
];
1115 req
.size
= 1U << buf_req
->size
;
1116 req
.addr
= buf_req
->addr
;
1117 req
.data
= buf_req
->data
;
1118 req
.type
= buf_req
->type
;
1120 qw
= (req
.size
== 8);
1122 if (rdptr
+ 1 == wrptr
) {
1123 hw_error("Incomplete quad word buffered ioreq");
1125 buf_req
= &buf_page
->buf_ioreq
[(rdptr
+ 1) %
1126 IOREQ_BUFFER_SLOT_NUM
];
1127 req
.data
|= ((uint64_t)buf_req
->data
) << 32;
1131 handle_ioreq(state
, &req
);
1133 /* Only req.data may get updated by handle_ioreq(), albeit even that
1134 * should not happen as such data would never make it to the guest (we
1135 * can only usefully see writes here after all).
1137 assert(req
.state
== STATE_IOREQ_READY
);
1138 assert(req
.count
== 1);
1139 assert(req
.dir
== IOREQ_WRITE
);
1140 assert(!req
.data_is_ptr
);
1142 atomic_add(&buf_page
->read_pointer
, qw
+ 1);
1148 static void handle_buffered_io(void *opaque
)
1150 XenIOState
*state
= opaque
;
1152 if (handle_buffered_iopage(state
)) {
1153 timer_mod(state
->buffered_io_timer
,
1154 BUFFER_IO_MAX_DELAY
+ qemu_clock_get_ms(QEMU_CLOCK_REALTIME
));
1156 timer_del(state
->buffered_io_timer
);
1157 xenevtchn_unmask(state
->xce_handle
, state
->bufioreq_local_port
);
1161 static void cpu_handle_ioreq(void *opaque
)
1163 XenIOState
*state
= opaque
;
1164 ioreq_t
*req
= cpu_get_ioreq(state
);
1166 handle_buffered_iopage(state
);
1168 ioreq_t copy
= *req
;
1171 handle_ioreq(state
, ©
);
1172 req
->data
= copy
.data
;
1174 if (req
->state
!= STATE_IOREQ_INPROCESS
) {
1175 fprintf(stderr
, "Badness in I/O request ... not in service?!: "
1176 "%x, ptr: %x, port: %"PRIx64
", "
1177 "data: %"PRIx64
", count: %u, size: %u, type: %u\n",
1178 req
->state
, req
->data_is_ptr
, req
->addr
,
1179 req
->data
, req
->count
, req
->size
, req
->type
);
1180 destroy_hvm_domain(false);
1184 xen_wmb(); /* Update ioreq contents /then/ update state. */
1187 * We do this before we send the response so that the tools
1188 * have the opportunity to pick up on the reset before the
1189 * guest resumes and does a hlt with interrupts disabled which
1190 * causes Xen to powerdown the domain.
1192 if (runstate_is_running()) {
1193 ShutdownCause request
;
1195 if (qemu_shutdown_requested_get()) {
1196 destroy_hvm_domain(false);
1198 request
= qemu_reset_requested_get();
1200 qemu_system_reset(request
);
1201 destroy_hvm_domain(true);
1205 req
->state
= STATE_IORESP_READY
;
1206 xenevtchn_notify(state
->xce_handle
,
1207 state
->ioreq_local_port
[state
->send_vcpu
]);
1211 static void xen_main_loop_prepare(XenIOState
*state
)
1215 if (state
->xce_handle
!= NULL
) {
1216 evtchn_fd
= xenevtchn_fd(state
->xce_handle
);
1219 state
->buffered_io_timer
= timer_new_ms(QEMU_CLOCK_REALTIME
, handle_buffered_io
,
1222 if (evtchn_fd
!= -1) {
1223 CPUState
*cpu_state
;
1225 DPRINTF("%s: Init cpu_by_vcpu_id\n", __func__
);
1226 CPU_FOREACH(cpu_state
) {
1227 DPRINTF("%s: cpu_by_vcpu_id[%d]=%p\n",
1228 __func__
, cpu_state
->cpu_index
, cpu_state
);
1229 state
->cpu_by_vcpu_id
[cpu_state
->cpu_index
] = cpu_state
;
1231 qemu_set_fd_handler(evtchn_fd
, cpu_handle_ioreq
, NULL
, state
);
1236 static void xen_hvm_change_state_handler(void *opaque
, int running
,
1239 XenIOState
*state
= opaque
;
1242 xen_main_loop_prepare(state
);
1245 xen_set_ioreq_server_state(xen_domid
,
1247 (rstate
== RUN_STATE_RUNNING
));
1250 static void xen_exit_notifier(Notifier
*n
, void *data
)
1252 XenIOState
*state
= container_of(n
, XenIOState
, exit
);
1254 xen_destroy_ioreq_server(xen_domid
, state
->ioservid
);
1256 xenevtchn_close(state
->xce_handle
);
1257 xs_daemon_close(state
->xenstore
);
1260 #ifdef XEN_COMPAT_PHYSMAP
1261 static void xen_read_physmap(XenIOState
*state
)
1263 XenPhysmap
*physmap
= NULL
;
1264 unsigned int len
, num
, i
;
1265 char path
[80], *value
= NULL
;
1266 char **entries
= NULL
;
1268 snprintf(path
, sizeof(path
),
1269 "/local/domain/0/device-model/%d/physmap", xen_domid
);
1270 entries
= xs_directory(state
->xenstore
, 0, path
, &num
);
1271 if (entries
== NULL
)
1274 for (i
= 0; i
< num
; i
++) {
1275 physmap
= g_malloc(sizeof (XenPhysmap
));
1276 physmap
->phys_offset
= strtoull(entries
[i
], NULL
, 16);
1277 snprintf(path
, sizeof(path
),
1278 "/local/domain/0/device-model/%d/physmap/%s/start_addr",
1279 xen_domid
, entries
[i
]);
1280 value
= xs_read(state
->xenstore
, 0, path
, &len
);
1281 if (value
== NULL
) {
1285 physmap
->start_addr
= strtoull(value
, NULL
, 16);
1288 snprintf(path
, sizeof(path
),
1289 "/local/domain/0/device-model/%d/physmap/%s/size",
1290 xen_domid
, entries
[i
]);
1291 value
= xs_read(state
->xenstore
, 0, path
, &len
);
1292 if (value
== NULL
) {
1296 physmap
->size
= strtoull(value
, NULL
, 16);
1299 snprintf(path
, sizeof(path
),
1300 "/local/domain/0/device-model/%d/physmap/%s/name",
1301 xen_domid
, entries
[i
]);
1302 physmap
->name
= xs_read(state
->xenstore
, 0, path
, &len
);
1304 QLIST_INSERT_HEAD(&xen_physmap
, physmap
, list
);
1309 static void xen_read_physmap(XenIOState
*state
)
1314 static void xen_wakeup_notifier(Notifier
*notifier
, void *data
)
1316 xc_set_hvm_param(xen_xc
, xen_domid
, HVM_PARAM_ACPI_S_STATE
, 0);
1319 static int xen_map_ioreq_server(XenIOState
*state
)
1322 xenforeignmemory_resource_handle
*fres
;
1323 xen_pfn_t ioreq_pfn
;
1324 xen_pfn_t bufioreq_pfn
;
1325 evtchn_port_t bufioreq_evtchn
;
1329 * Attempt to map using the resource API and fall back to normal
1330 * foreign mapping if this is not supported.
1332 QEMU_BUILD_BUG_ON(XENMEM_resource_ioreq_server_frame_bufioreq
!= 0);
1333 QEMU_BUILD_BUG_ON(XENMEM_resource_ioreq_server_frame_ioreq(0) != 1);
1334 fres
= xenforeignmemory_map_resource(xen_fmem
, xen_domid
,
1335 XENMEM_resource_ioreq_server
,
1336 state
->ioservid
, 0, 2,
1338 PROT_READ
| PROT_WRITE
, 0);
1340 trace_xen_map_resource_ioreq(state
->ioservid
, addr
);
1341 state
->buffered_io_page
= addr
;
1342 state
->shared_page
= addr
+ TARGET_PAGE_SIZE
;
1343 } else if (errno
!= EOPNOTSUPP
) {
1344 error_report("failed to map ioreq server resources: error %d handle=%p",
1349 rc
= xen_get_ioreq_server_info(xen_domid
, state
->ioservid
,
1350 (state
->shared_page
== NULL
) ?
1352 (state
->buffered_io_page
== NULL
) ?
1353 &bufioreq_pfn
: NULL
,
1356 error_report("failed to get ioreq server info: error %d handle=%p",
1361 if (state
->shared_page
== NULL
) {
1362 DPRINTF("shared page at pfn %lx\n", ioreq_pfn
);
1364 state
->shared_page
= xenforeignmemory_map(xen_fmem
, xen_domid
,
1365 PROT_READ
| PROT_WRITE
,
1366 1, &ioreq_pfn
, NULL
);
1367 if (state
->shared_page
== NULL
) {
1368 error_report("map shared IO page returned error %d handle=%p",
1373 if (state
->buffered_io_page
== NULL
) {
1374 DPRINTF("buffered io page at pfn %lx\n", bufioreq_pfn
);
1376 state
->buffered_io_page
= xenforeignmemory_map(xen_fmem
, xen_domid
,
1377 PROT_READ
| PROT_WRITE
,
1380 if (state
->buffered_io_page
== NULL
) {
1381 error_report("map buffered IO page returned error %d", errno
);
1386 if (state
->shared_page
== NULL
|| state
->buffered_io_page
== NULL
) {
1390 DPRINTF("buffered io evtchn is %x\n", bufioreq_evtchn
);
1392 state
->bufioreq_remote_port
= bufioreq_evtchn
;
1397 void xen_hvm_init(PCMachineState
*pcms
, MemoryRegion
**ram_memory
)
1399 MachineState
*ms
= MACHINE(pcms
);
1400 unsigned int max_cpus
= ms
->smp
.max_cpus
;
1402 xen_pfn_t ioreq_pfn
;
1405 state
= g_malloc0(sizeof (XenIOState
));
1407 state
->xce_handle
= xenevtchn_open(NULL
, 0);
1408 if (state
->xce_handle
== NULL
) {
1409 perror("xen: event channel open");
1413 state
->xenstore
= xs_daemon_open();
1414 if (state
->xenstore
== NULL
) {
1415 perror("xen: xenstore open");
1419 xen_create_ioreq_server(xen_domid
, &state
->ioservid
);
1421 state
->exit
.notify
= xen_exit_notifier
;
1422 qemu_add_exit_notifier(&state
->exit
);
1424 state
->suspend
.notify
= xen_suspend_notifier
;
1425 qemu_register_suspend_notifier(&state
->suspend
);
1427 state
->wakeup
.notify
= xen_wakeup_notifier
;
1428 qemu_register_wakeup_notifier(&state
->wakeup
);
1431 * Register wake-up support in QMP query-current-machine API
1433 qemu_register_wakeup_support();
1435 rc
= xen_map_ioreq_server(state
);
1440 rc
= xen_get_vmport_regs_pfn(xen_xc
, xen_domid
, &ioreq_pfn
);
1442 DPRINTF("shared vmport page at pfn %lx\n", ioreq_pfn
);
1443 state
->shared_vmport_page
=
1444 xenforeignmemory_map(xen_fmem
, xen_domid
, PROT_READ
|PROT_WRITE
,
1445 1, &ioreq_pfn
, NULL
);
1446 if (state
->shared_vmport_page
== NULL
) {
1447 error_report("map shared vmport IO page returned error %d handle=%p",
1451 } else if (rc
!= -ENOSYS
) {
1452 error_report("get vmport regs pfn returned error %d, rc=%d",
1457 /* Note: cpus is empty at this point in init */
1458 state
->cpu_by_vcpu_id
= g_malloc0(max_cpus
* sizeof(CPUState
*));
1460 rc
= xen_set_ioreq_server_state(xen_domid
, state
->ioservid
, true);
1462 error_report("failed to enable ioreq server info: error %d handle=%p",
1467 state
->ioreq_local_port
= g_malloc0(max_cpus
* sizeof (evtchn_port_t
));
1469 /* FIXME: how about if we overflow the page here? */
1470 for (i
= 0; i
< max_cpus
; i
++) {
1471 rc
= xenevtchn_bind_interdomain(state
->xce_handle
, xen_domid
,
1472 xen_vcpu_eport(state
->shared_page
, i
));
1474 error_report("shared evtchn %d bind error %d", i
, errno
);
1477 state
->ioreq_local_port
[i
] = rc
;
1480 rc
= xenevtchn_bind_interdomain(state
->xce_handle
, xen_domid
,
1481 state
->bufioreq_remote_port
);
1483 error_report("buffered evtchn bind error %d", errno
);
1486 state
->bufioreq_local_port
= rc
;
1488 /* Init RAM management */
1489 #ifdef XEN_COMPAT_PHYSMAP
1490 xen_map_cache_init(xen_phys_offset_to_gaddr
, state
);
1492 xen_map_cache_init(NULL
, state
);
1494 xen_ram_init(pcms
, ram_size
, ram_memory
);
1496 qemu_add_vm_change_state_handler(xen_hvm_change_state_handler
, state
);
1498 state
->memory_listener
= xen_memory_listener
;
1499 memory_listener_register(&state
->memory_listener
, &address_space_memory
);
1500 state
->log_for_dirtybit
= NULL
;
1502 state
->io_listener
= xen_io_listener
;
1503 memory_listener_register(&state
->io_listener
, &address_space_io
);
1505 state
->device_listener
= xen_device_listener
;
1506 QLIST_INIT(&state
->dev_list
);
1507 device_listener_register(&state
->device_listener
);
1511 /* Initialize backend core & drivers */
1512 if (xen_be_init() != 0) {
1513 error_report("xen backend core setup failed");
1516 xen_be_register_common();
1518 QLIST_INIT(&xen_physmap
);
1519 xen_read_physmap(state
);
1521 /* Disable ACPI build because Xen handles it */
1522 pcms
->acpi_build_enabled
= false;
1527 error_report("xen hardware virtual machine initialisation failed");
1531 void destroy_hvm_domain(bool reboot
)
1533 xc_interface
*xc_handle
;
1537 unsigned int reason
= reboot
? SHUTDOWN_reboot
: SHUTDOWN_poweroff
;
1540 rc
= xendevicemodel_shutdown(xen_dmod
, xen_domid
, reason
);
1544 if (errno
!= ENOTTY
/* old Xen */) {
1545 perror("xendevicemodel_shutdown failed");
1547 /* well, try the old thing then */
1550 xc_handle
= xc_interface_open(0, 0, 0);
1551 if (xc_handle
== NULL
) {
1552 fprintf(stderr
, "Cannot acquire xenctrl handle\n");
1554 sts
= xc_domain_shutdown(xc_handle
, xen_domid
, reason
);
1556 fprintf(stderr
, "xc_domain_shutdown failed to issue %s, "
1557 "sts %d, %s\n", reboot
? "reboot" : "poweroff",
1558 sts
, strerror(errno
));
1560 fprintf(stderr
, "Issued domain %d %s\n", xen_domid
,
1561 reboot
? "reboot" : "poweroff");
1563 xc_interface_close(xc_handle
);
1567 void xen_register_framebuffer(MemoryRegion
*mr
)
1572 void xen_shutdown_fatal_error(const char *fmt
, ...)
1577 vfprintf(stderr
, fmt
, ap
);
1579 fprintf(stderr
, "Will destroy the domain.\n");
1580 /* destroy the domain */
1581 qemu_system_shutdown_request(SHUTDOWN_CAUSE_HOST_ERROR
);
1584 void xen_hvm_modified_memory(ram_addr_t start
, ram_addr_t length
)
1586 if (unlikely(xen_in_migration
)) {
1588 ram_addr_t start_pfn
, nb_pages
;
1590 start
= xen_phys_offset_to_gaddr(start
, length
);
1593 length
= TARGET_PAGE_SIZE
;
1595 start_pfn
= start
>> TARGET_PAGE_BITS
;
1596 nb_pages
= ((start
+ length
+ TARGET_PAGE_SIZE
- 1) >> TARGET_PAGE_BITS
)
1598 rc
= xen_modified_memory(xen_domid
, start_pfn
, nb_pages
);
1601 "%s failed for "RAM_ADDR_FMT
" ("RAM_ADDR_FMT
"): %i, %s\n",
1602 __func__
, start
, nb_pages
, errno
, strerror(errno
));
1607 void qmp_xen_set_global_dirty_log(bool enable
, Error
**errp
)
1610 memory_global_dirty_log_start();
1612 memory_global_dirty_log_stop();